src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 03 May 2010 10:24:51 -0700

author
ysr
date
Mon, 03 May 2010 10:24:51 -0700
changeset 1873
3bfae429e2cf
parent 1583
05b775309e59
child 1876
a8127dc669ba
permissions
-rw-r--r--

6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
Summary: On sun4v/CMT avoid use of memset() in BOT updates so as to prevent concurrent BOT readers from seeing the phantom zeros arising from memset()'s use of BIS.
Reviewed-by: jmasa, johnc, minqi, poonam, tonyp

duke@435 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
duke@435 27
duke@435 28 /////////////////////////////////////////////////////////////////////////
duke@435 29 //// CompactibleFreeListSpace
duke@435 30 /////////////////////////////////////////////////////////////////////////
duke@435 31
duke@435 32 // highest ranked free list lock rank
duke@435 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 34
duke@435 35 // Constructor
duke@435 36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 37 MemRegion mr, bool use_adaptive_freelists,
duke@435 38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 39 _dictionaryChoice(dictionaryChoice),
duke@435 40 _adaptive_freelists(use_adaptive_freelists),
duke@435 41 _bt(bs, mr),
duke@435 42 // free list locks are in the range of values taken by _lockRank
duke@435 43 // This range currently is [_leaf+2, _leaf+3]
duke@435 44 // Note: this requires that CFLspace c'tors
duke@435 45 // are called serially in the order in which the locks are
duke@435 46 // are acquired in the program text. This is true today.
duke@435 47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 49 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 51 CMSRescanMultiple),
duke@435 52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 53 CMSConcMarkMultiple),
duke@435 54 _collector(NULL)
duke@435 55 {
duke@435 56 _bt.set_space(this);
jmasa@698 57 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 58 // We have all of "mr", all of which we place in the dictionary
duke@435 59 // as one big chunk. We'll need to decide here which of several
duke@435 60 // possible alternative dictionary implementations to use. For
duke@435 61 // now the choice is easy, since we have only one working
duke@435 62 // implementation, namely, the simple binary tree (splaying
duke@435 63 // temporarily disabled).
duke@435 64 switch (dictionaryChoice) {
duke@435 65 case FreeBlockDictionary::dictionarySplayTree:
duke@435 66 case FreeBlockDictionary::dictionarySkipList:
duke@435 67 default:
duke@435 68 warning("dictionaryChoice: selected option not understood; using"
duke@435 69 " default BinaryTreeDictionary implementation instead.");
ysr@1580 70 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 71 _dictionary = new BinaryTreeDictionary(mr);
duke@435 72 break;
duke@435 73 }
duke@435 74 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 75 // The indexed free lists are initially all empty and are lazily
duke@435 76 // filled in on demand. Initialize the array elements to NULL.
duke@435 77 initializeIndexedFreeListArray();
duke@435 78
duke@435 79 // Not using adaptive free lists assumes that allocation is first
duke@435 80 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 81 // has to have the klass's klassKlass allocated at a lower
duke@435 82 // address in the heap than the klass so that the klassKlass is
duke@435 83 // moved to its new location before the klass is moved.
duke@435 84 // Set the _refillSize for the linear allocation blocks
duke@435 85 if (!use_adaptive_freelists) {
duke@435 86 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 87 // The small linAB initially has all the space and will allocate
duke@435 88 // a chunk of any size.
duke@435 89 HeapWord* addr = (HeapWord*) fc;
duke@435 90 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 91 1024*SmallForLinearAlloc, fc->size());
duke@435 92 // Note that _unallocated_block is not updated here.
duke@435 93 // Allocations from the linear allocation block should
duke@435 94 // update it.
duke@435 95 } else {
duke@435 96 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 97 SmallForLinearAlloc);
duke@435 98 }
duke@435 99 // CMSIndexedFreeListReplenish should be at least 1
duke@435 100 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 101 _promoInfo.setSpace(this);
duke@435 102 if (UseCMSBestFit) {
duke@435 103 _fitStrategy = FreeBlockBestFitFirst;
duke@435 104 } else {
duke@435 105 _fitStrategy = FreeBlockStrategyNone;
duke@435 106 }
duke@435 107 checkFreeListConsistency();
duke@435 108
duke@435 109 // Initialize locks for parallel case.
duke@435 110 if (ParallelGCThreads > 0) {
duke@435 111 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 112 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 113 "a freelist par lock",
duke@435 114 true);
duke@435 115 if (_indexedFreeListParLocks[i] == NULL)
duke@435 116 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 117 DEBUG_ONLY(
duke@435 118 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 119 )
duke@435 120 }
duke@435 121 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 122 }
duke@435 123 }
duke@435 124
duke@435 125 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 126 // update the block offset table. Removed initialize_threshold call because
duke@435 127 // CFLS does not use a block offset array for contiguous spaces.
duke@435 128 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 129 CompactPoint* cp, HeapWord* compact_top) {
duke@435 130 // q is alive
duke@435 131 // First check if we should switch compaction space
duke@435 132 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 133 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 134 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 135 "virtual adjustObjectSize_v() method is not correct");
duke@435 136 size_t adjusted_size = adjustObjectSize(size);
duke@435 137 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 138 "no small fragments allowed");
duke@435 139 assert(minimum_free_block_size() == MinChunkSize,
duke@435 140 "for de-virtualized reference below");
duke@435 141 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 142 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 143 adjusted_size != compaction_max_size) {
duke@435 144 do {
duke@435 145 // switch to next compaction space
duke@435 146 cp->space->set_compaction_top(compact_top);
duke@435 147 cp->space = cp->space->next_compaction_space();
duke@435 148 if (cp->space == NULL) {
duke@435 149 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 150 assert(cp->gen != NULL, "compaction must succeed");
duke@435 151 cp->space = cp->gen->first_compaction_space();
duke@435 152 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 153 }
duke@435 154 compact_top = cp->space->bottom();
duke@435 155 cp->space->set_compaction_top(compact_top);
duke@435 156 // The correct adjusted_size may not be the same as that for this method
duke@435 157 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 158 // Use the virtual method which is not used above to save the virtual
duke@435 159 // dispatch.
duke@435 160 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 161 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 162 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 163 } while (adjusted_size > compaction_max_size);
duke@435 164 }
duke@435 165
duke@435 166 // store the forwarding pointer into the mark word
duke@435 167 if ((HeapWord*)q != compact_top) {
duke@435 168 q->forward_to(oop(compact_top));
duke@435 169 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 170 } else {
duke@435 171 // if the object isn't moving we can just set the mark to the default
duke@435 172 // mark and handle it specially later on.
duke@435 173 q->init_mark();
duke@435 174 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 175 }
duke@435 176
coleenp@548 177 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 178 compact_top += adjusted_size;
duke@435 179
duke@435 180 // we need to update the offset table so that the beginnings of objects can be
duke@435 181 // found during scavenge. Note that we are updating the offset table based on
duke@435 182 // where the object will be once the compaction phase finishes.
duke@435 183
duke@435 184 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 185 // the compaction_top exceeds the current threshold but not for an
duke@435 186 // non-contiguous space.
duke@435 187 cp->threshold =
duke@435 188 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 189 return compact_top;
duke@435 190 }
duke@435 191
duke@435 192 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 193 // and use of single_block instead of alloc_block. The name here is not really
duke@435 194 // appropriate - maybe a more general name could be invented for both the
duke@435 195 // contiguous and noncontiguous spaces.
duke@435 196
duke@435 197 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 198 _bt.single_block(start, the_end);
duke@435 199 return end();
duke@435 200 }
duke@435 201
duke@435 202 // Initialize them to NULL.
duke@435 203 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 204 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 205 // Note that on platforms where objects are double word aligned,
duke@435 206 // the odd array elements are not used. It is convenient, however,
duke@435 207 // to map directly from the object size to the array element.
duke@435 208 _indexedFreeList[i].reset(IndexSetSize);
duke@435 209 _indexedFreeList[i].set_size(i);
duke@435 210 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 211 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 212 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 213 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 214 }
duke@435 215 }
duke@435 216
duke@435 217 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 218 for (int i = 1; i < IndexSetSize; i++) {
duke@435 219 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 220 "Indexed free list sizes are incorrect");
duke@435 221 _indexedFreeList[i].reset(IndexSetSize);
duke@435 222 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 223 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 224 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 225 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 226 }
duke@435 227 }
duke@435 228
duke@435 229 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 230 resetIndexedFreeListArray();
duke@435 231 dictionary()->reset();
duke@435 232 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 233 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 234 // Everything's allocated until proven otherwise.
duke@435 235 _bt.set_unallocated_block(end());
duke@435 236 }
duke@435 237 if (!mr.is_empty()) {
duke@435 238 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 239 _bt.single_block(mr.start(), mr.word_size());
duke@435 240 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 241 fc->setSize(mr.word_size());
duke@435 242 if (mr.word_size() >= IndexSetSize ) {
duke@435 243 returnChunkToDictionary(fc);
duke@435 244 } else {
duke@435 245 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 246 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 247 }
duke@435 248 }
duke@435 249 _promoInfo.reset();
duke@435 250 _smallLinearAllocBlock._ptr = NULL;
duke@435 251 _smallLinearAllocBlock._word_size = 0;
duke@435 252 }
duke@435 253
duke@435 254 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 255 // Reset the space to the new reality - one free chunk.
duke@435 256 MemRegion mr(compaction_top(), end());
duke@435 257 reset(mr);
duke@435 258 // Now refill the linear allocation block(s) if possible.
duke@435 259 if (_adaptive_freelists) {
duke@435 260 refillLinearAllocBlocksIfNeeded();
duke@435 261 } else {
duke@435 262 // Place as much of mr in the linAB as we can get,
duke@435 263 // provided it was big enough to go into the dictionary.
duke@435 264 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 265 if (fc != NULL) {
duke@435 266 assert(fc->size() == mr.word_size(),
duke@435 267 "Why was the chunk broken up?");
duke@435 268 removeChunkFromDictionary(fc);
duke@435 269 HeapWord* addr = (HeapWord*) fc;
duke@435 270 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 271 1024*SmallForLinearAlloc, fc->size());
duke@435 272 // Note that _unallocated_block is not updated here.
duke@435 273 }
duke@435 274 }
duke@435 275 }
duke@435 276
duke@435 277 // Walks the entire dictionary, returning a coterminal
duke@435 278 // chunk, if it exists. Use with caution since it involves
duke@435 279 // a potentially complete walk of a potentially large tree.
duke@435 280 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 281
duke@435 282 assert_lock_strong(&_freelistLock);
duke@435 283
duke@435 284 return dictionary()->find_chunk_ends_at(end());
duke@435 285 }
duke@435 286
duke@435 287
duke@435 288 #ifndef PRODUCT
duke@435 289 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 290 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 291 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 292 }
duke@435 293 }
duke@435 294
duke@435 295 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 296 size_t sum = 0;
duke@435 297 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 298 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 299 }
duke@435 300 return sum;
duke@435 301 }
duke@435 302
duke@435 303 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 304 size_t count = 0;
duke@435 305 for (int i = MinChunkSize; i < IndexSetSize; i++) {
duke@435 306 debug_only(
duke@435 307 ssize_t total_list_count = 0;
duke@435 308 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 309 fc = fc->next()) {
duke@435 310 total_list_count++;
duke@435 311 }
duke@435 312 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 313 "Count in list is incorrect");
duke@435 314 )
duke@435 315 count += _indexedFreeList[i].count();
duke@435 316 }
duke@435 317 return count;
duke@435 318 }
duke@435 319
duke@435 320 size_t CompactibleFreeListSpace::totalCount() {
duke@435 321 size_t num = totalCountInIndexedFreeLists();
duke@435 322 num += dictionary()->totalCount();
duke@435 323 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 324 num++;
duke@435 325 }
duke@435 326 return num;
duke@435 327 }
duke@435 328 #endif
duke@435 329
duke@435 330 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 331 FreeChunk* fc = (FreeChunk*) p;
duke@435 332 return fc->isFree();
duke@435 333 }
duke@435 334
duke@435 335 size_t CompactibleFreeListSpace::used() const {
duke@435 336 return capacity() - free();
duke@435 337 }
duke@435 338
duke@435 339 size_t CompactibleFreeListSpace::free() const {
duke@435 340 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 341 // if you do this while the structures are in flux you
duke@435 342 // may get an approximate answer only; for instance
duke@435 343 // because there is concurrent allocation either
duke@435 344 // directly by mutators or for promotion during a GC.
duke@435 345 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 346 // not to crash and burn, for instance, because of walking
duke@435 347 // pointers that could disappear as you were walking them.
duke@435 348 // The approximation is because the various components
duke@435 349 // that are read below are not read atomically (and
duke@435 350 // further the computation of totalSizeInIndexedFreeLists()
duke@435 351 // is itself a non-atomic computation. The normal use of
duke@435 352 // this is during a resize operation at the end of GC
duke@435 353 // and at that time you are guaranteed to get the
duke@435 354 // correct actual value. However, for instance, this is
duke@435 355 // also read completely asynchronously by the "perf-sampler"
duke@435 356 // that supports jvmstat, and you are apt to see the values
duke@435 357 // flicker in such cases.
duke@435 358 assert(_dictionary != NULL, "No _dictionary?");
duke@435 359 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 360 totalSizeInIndexedFreeLists() +
duke@435 361 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 362 }
duke@435 363
duke@435 364 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 365 assert(_dictionary != NULL, "No _dictionary?");
duke@435 366 assert_locked();
duke@435 367 size_t res = _dictionary->maxChunkSize();
duke@435 368 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 369 (size_t) SmallForLinearAlloc - 1));
duke@435 370 // XXX the following could potentially be pretty slow;
duke@435 371 // should one, pesimally for the rare cases when res
duke@435 372 // caclulated above is less than IndexSetSize,
duke@435 373 // just return res calculated above? My reasoning was that
duke@435 374 // those cases will be so rare that the extra time spent doesn't
duke@435 375 // really matter....
duke@435 376 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 377 // to i > res below, because i is unsigned and res may be zero.
duke@435 378 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 379 i -= IndexSetStride) {
duke@435 380 if (_indexedFreeList[i].head() != NULL) {
duke@435 381 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 382 return i;
duke@435 383 }
duke@435 384 }
duke@435 385 return res;
duke@435 386 }
duke@435 387
ysr@1580 388 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 389 const {
ysr@1580 390 reportIndexedFreeListStatistics();
ysr@1580 391 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 392 gclog_or_tty->print_cr("---------------------------");
ysr@1580 393 FreeList::print_labels_on(st, "size");
ysr@1580 394 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 395 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 396 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 397 fc = fc->next()) {
ysr@1580 398 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 399 fc, (HeapWord*)fc + i,
ysr@1580 400 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 401 }
ysr@1580 402 }
ysr@1580 403 }
ysr@1580 404
ysr@1580 405 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 406 const {
ysr@1580 407 _promoInfo.print_on(st);
ysr@1580 408 }
ysr@1580 409
ysr@1580 410 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 411 const {
ysr@1580 412 _dictionary->reportStatistics();
ysr@1580 413 st->print_cr("Layout of Freelists in Tree");
ysr@1580 414 st->print_cr("---------------------------");
ysr@1580 415 _dictionary->print_free_lists(st);
ysr@1580 416 }
ysr@1580 417
ysr@1580 418 class BlkPrintingClosure: public BlkClosure {
ysr@1580 419 const CMSCollector* _collector;
ysr@1580 420 const CompactibleFreeListSpace* _sp;
ysr@1580 421 const CMSBitMap* _live_bit_map;
ysr@1580 422 const bool _post_remark;
ysr@1580 423 outputStream* _st;
ysr@1580 424 public:
ysr@1580 425 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 426 const CompactibleFreeListSpace* sp,
ysr@1580 427 const CMSBitMap* live_bit_map,
ysr@1580 428 outputStream* st):
ysr@1580 429 _collector(collector),
ysr@1580 430 _sp(sp),
ysr@1580 431 _live_bit_map(live_bit_map),
ysr@1580 432 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 433 _st(st) { }
ysr@1580 434 size_t do_blk(HeapWord* addr);
ysr@1580 435 };
ysr@1580 436
ysr@1580 437 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 438 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 439 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 440 if (_sp->block_is_obj(addr)) {
ysr@1580 441 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 442 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 443 addr,
ysr@1580 444 dead ? "dead" : "live",
ysr@1580 445 sz,
ysr@1580 446 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 447 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 448 oop(addr)->print_on(_st);
ysr@1580 449 _st->print_cr("--------------------------------------");
ysr@1580 450 }
ysr@1580 451 } else { // free block
ysr@1580 452 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 453 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 454 if (CMSPrintChunksInDump) {
ysr@1580 455 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 456 _st->print_cr("--------------------------------------");
ysr@1580 457 }
ysr@1580 458 }
ysr@1580 459 return sz;
ysr@1580 460 }
ysr@1580 461
ysr@1580 462 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 463 outputStream* st) {
ysr@1580 464 st->print_cr("\n=========================");
ysr@1580 465 st->print_cr("Block layout in CMS Heap:");
ysr@1580 466 st->print_cr("=========================");
ysr@1580 467 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 468 blk_iterate(&bpcl);
ysr@1580 469
ysr@1580 470 st->print_cr("\n=======================================");
ysr@1580 471 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 472 st->print_cr("=======================================");
ysr@1580 473 print_promo_info_blocks(st);
ysr@1580 474
ysr@1580 475 st->print_cr("\n===========================");
ysr@1580 476 st->print_cr("Order of Indexed Free Lists");
ysr@1580 477 st->print_cr("=========================");
ysr@1580 478 print_indexed_free_lists(st);
ysr@1580 479
ysr@1580 480 st->print_cr("\n=================================");
ysr@1580 481 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 482 st->print_cr("=================================");
ysr@1580 483 print_dictionary_free_lists(st);
ysr@1580 484 }
ysr@1580 485
ysr@1580 486
duke@435 487 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 488 assert_lock_strong(&_freelistLock);
duke@435 489 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 490 _dictionary->reportStatistics();
duke@435 491 if (PrintFLSStatistics > 1) {
duke@435 492 reportIndexedFreeListStatistics();
duke@435 493 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 494 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 495 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 496 }
duke@435 497 }
duke@435 498
duke@435 499 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 500 assert_lock_strong(&_freelistLock);
duke@435 501 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 502 "--------------------------------\n");
duke@435 503 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 504 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 505 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 506 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 507 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 508 if (freeBlocks != 0) {
duke@435 509 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 510 }
duke@435 511 }
duke@435 512
duke@435 513 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 514 size_t res = 0;
duke@435 515 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 516 debug_only(
duke@435 517 ssize_t recount = 0;
duke@435 518 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 519 fc = fc->next()) {
duke@435 520 recount += 1;
duke@435 521 }
duke@435 522 assert(recount == _indexedFreeList[i].count(),
duke@435 523 "Incorrect count in list");
duke@435 524 )
duke@435 525 res += _indexedFreeList[i].count();
duke@435 526 }
duke@435 527 return res;
duke@435 528 }
duke@435 529
duke@435 530 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 531 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 532 if (_indexedFreeList[i].head() != NULL) {
duke@435 533 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 534 return (size_t)i;
duke@435 535 }
duke@435 536 }
duke@435 537 return 0;
duke@435 538 }
duke@435 539
duke@435 540 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 541 HeapWord* prevEnd = end();
duke@435 542 assert(prevEnd != value, "unnecessary set_end call");
duke@435 543 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
duke@435 544 _end = value;
duke@435 545 if (prevEnd != NULL) {
duke@435 546 // Resize the underlying block offset table.
duke@435 547 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 548 if (value <= prevEnd) {
ysr@1580 549 assert(value >= unallocated_block(), "New end is below unallocated block");
ysr@1580 550 } else {
ysr@1580 551 // Now, take this new chunk and add it to the free blocks.
ysr@1580 552 // Note that the BOT has not yet been updated for this block.
ysr@1580 553 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 554 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 555 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 556 // Mark the boundary of the new block in BOT
ysr@1580 557 _bt.mark_block(prevEnd, value);
ysr@1580 558 // put it all in the linAB
ysr@1580 559 if (ParallelGCThreads == 0) {
ysr@1580 560 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 561 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 562 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 563 } else { // ParallelGCThreads > 0
ysr@1580 564 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 565 Mutex::_no_safepoint_check_flag);
ysr@1580 566 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 567 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 568 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 569 }
ysr@1580 570 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 571 // of chunks as they are allocated out of a LinAB are.
ysr@1580 572 } else {
ysr@1580 573 // Add the block to the free lists, if possible coalescing it
ysr@1580 574 // with the last free block, and update the BOT and census data.
ysr@1580 575 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 576 }
duke@435 577 }
duke@435 578 }
duke@435 579 }
duke@435 580
duke@435 581 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 582 CompactibleFreeListSpace* _cfls;
duke@435 583 CMSCollector* _collector;
duke@435 584 protected:
duke@435 585 // Override.
duke@435 586 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 587 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 588 HeapWord* bottom, HeapWord* top, \
duke@435 589 ClosureType* cl); \
duke@435 590 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 591 HeapWord* bottom, HeapWord* top, \
duke@435 592 ClosureType* cl); \
duke@435 593 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 594 HeapWord* bottom, HeapWord* top, \
duke@435 595 ClosureType* cl)
duke@435 596 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 597 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 598
duke@435 599 public:
duke@435 600 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 601 CMSCollector* collector,
duke@435 602 OopClosure* cl,
duke@435 603 CardTableModRefBS::PrecisionStyle precision,
duke@435 604 HeapWord* boundary) :
duke@435 605 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 606 _cfls(sp), _collector(collector) {}
duke@435 607 };
duke@435 608
duke@435 609 // We de-virtualize the block-related calls below, since we know that our
duke@435 610 // space is a CompactibleFreeListSpace.
duke@435 611 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 612 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 613 HeapWord* bottom, \
duke@435 614 HeapWord* top, \
duke@435 615 ClosureType* cl) { \
duke@435 616 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 617 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 618 } else { \
duke@435 619 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 620 } \
duke@435 621 } \
duke@435 622 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 623 HeapWord* bottom, \
duke@435 624 HeapWord* top, \
duke@435 625 ClosureType* cl) { \
duke@435 626 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 627 back too far. */ \
duke@435 628 HeapWord* mr_start = mr.start(); \
duke@435 629 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 630 HeapWord* next = bottom + bot_size; \
duke@435 631 while (next < mr_start) { \
duke@435 632 bottom = next; \
duke@435 633 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 634 next = bottom + bot_size; \
duke@435 635 } \
duke@435 636 \
duke@435 637 while (bottom < top) { \
duke@435 638 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 639 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 640 oop(bottom)) && \
duke@435 641 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 642 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 643 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 644 } else { \
duke@435 645 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 646 } \
duke@435 647 } \
duke@435 648 } \
duke@435 649 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 650 HeapWord* bottom, \
duke@435 651 HeapWord* top, \
duke@435 652 ClosureType* cl) { \
duke@435 653 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 654 back too far. */ \
duke@435 655 HeapWord* mr_start = mr.start(); \
duke@435 656 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 657 HeapWord* next = bottom + bot_size; \
duke@435 658 while (next < mr_start) { \
duke@435 659 bottom = next; \
duke@435 660 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 661 next = bottom + bot_size; \
duke@435 662 } \
duke@435 663 \
duke@435 664 while (bottom < top) { \
duke@435 665 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 666 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 667 oop(bottom)) && \
duke@435 668 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 669 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 670 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 671 } else { \
duke@435 672 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 673 } \
duke@435 674 } \
duke@435 675 }
duke@435 676
duke@435 677 // (There are only two of these, rather than N, because the split is due
duke@435 678 // only to the introduction of the FilteringClosure, a local part of the
duke@435 679 // impl of this abstraction.)
duke@435 680 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 681 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 682
duke@435 683 DirtyCardToOopClosure*
duke@435 684 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 685 CardTableModRefBS::PrecisionStyle precision,
duke@435 686 HeapWord* boundary) {
duke@435 687 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 688 }
duke@435 689
duke@435 690
duke@435 691 // Note on locking for the space iteration functions:
duke@435 692 // since the collector's iteration activities are concurrent with
duke@435 693 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 694 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 695 // may suddenly be allocated or divided up and part of it allocated and
duke@435 696 // so on.
duke@435 697
duke@435 698 // Apply the given closure to each block in the space.
duke@435 699 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 700 assert_lock_strong(freelistLock());
duke@435 701 HeapWord *cur, *limit;
duke@435 702 for (cur = bottom(), limit = end(); cur < limit;
duke@435 703 cur += cl->do_blk_careful(cur));
duke@435 704 }
duke@435 705
duke@435 706 // Apply the given closure to each block in the space.
duke@435 707 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 708 assert_lock_strong(freelistLock());
duke@435 709 HeapWord *cur, *limit;
duke@435 710 for (cur = bottom(), limit = end(); cur < limit;
duke@435 711 cur += cl->do_blk(cur));
duke@435 712 }
duke@435 713
duke@435 714 // Apply the given closure to each oop in the space.
duke@435 715 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 716 assert_lock_strong(freelistLock());
duke@435 717 HeapWord *cur, *limit;
duke@435 718 size_t curSize;
duke@435 719 for (cur = bottom(), limit = end(); cur < limit;
duke@435 720 cur += curSize) {
duke@435 721 curSize = block_size(cur);
duke@435 722 if (block_is_obj(cur)) {
duke@435 723 oop(cur)->oop_iterate(cl);
duke@435 724 }
duke@435 725 }
duke@435 726 }
duke@435 727
duke@435 728 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 729 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 730 assert_lock_strong(freelistLock());
duke@435 731 if (is_empty()) {
duke@435 732 return;
duke@435 733 }
duke@435 734 MemRegion cur = MemRegion(bottom(), end());
duke@435 735 mr = mr.intersection(cur);
duke@435 736 if (mr.is_empty()) {
duke@435 737 return;
duke@435 738 }
duke@435 739 if (mr.equals(cur)) {
duke@435 740 oop_iterate(cl);
duke@435 741 return;
duke@435 742 }
duke@435 743 assert(mr.end() <= end(), "just took an intersection above");
duke@435 744 HeapWord* obj_addr = block_start(mr.start());
duke@435 745 HeapWord* t = mr.end();
duke@435 746
duke@435 747 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 748 if (block_is_obj(obj_addr)) {
duke@435 749 // Handle first object specially.
duke@435 750 oop obj = oop(obj_addr);
duke@435 751 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 752 } else {
duke@435 753 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 754 obj_addr += fc->size();
duke@435 755 }
duke@435 756 while (obj_addr < t) {
duke@435 757 HeapWord* obj = obj_addr;
duke@435 758 obj_addr += block_size(obj_addr);
duke@435 759 // If "obj_addr" is not greater than top, then the
duke@435 760 // entire object "obj" is within the region.
duke@435 761 if (obj_addr <= t) {
duke@435 762 if (block_is_obj(obj)) {
duke@435 763 oop(obj)->oop_iterate(cl);
duke@435 764 }
duke@435 765 } else {
duke@435 766 // "obj" extends beyond end of region
duke@435 767 if (block_is_obj(obj)) {
duke@435 768 oop(obj)->oop_iterate(&smr_blk);
duke@435 769 }
duke@435 770 break;
duke@435 771 }
duke@435 772 }
duke@435 773 }
duke@435 774
duke@435 775 // NOTE: In the following methods, in order to safely be able to
duke@435 776 // apply the closure to an object, we need to be sure that the
duke@435 777 // object has been initialized. We are guaranteed that an object
duke@435 778 // is initialized if we are holding the Heap_lock with the
duke@435 779 // world stopped.
duke@435 780 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 781 if (is_init_completed()) {
duke@435 782 assert_locked_or_safepoint(Heap_lock);
duke@435 783 if (Universe::is_fully_initialized()) {
duke@435 784 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 785 "Required for objects to be initialized");
duke@435 786 }
duke@435 787 } // else make a concession at vm start-up
duke@435 788 }
duke@435 789
duke@435 790 // Apply the given closure to each object in the space
duke@435 791 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 792 assert_lock_strong(freelistLock());
duke@435 793 NOT_PRODUCT(verify_objects_initialized());
duke@435 794 HeapWord *cur, *limit;
duke@435 795 size_t curSize;
duke@435 796 for (cur = bottom(), limit = end(); cur < limit;
duke@435 797 cur += curSize) {
duke@435 798 curSize = block_size(cur);
duke@435 799 if (block_is_obj(cur)) {
duke@435 800 blk->do_object(oop(cur));
duke@435 801 }
duke@435 802 }
duke@435 803 }
duke@435 804
jmasa@952 805 // Apply the given closure to each live object in the space
jmasa@952 806 // The usage of CompactibleFreeListSpace
jmasa@952 807 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 808 // objects in the space with references to objects that are no longer
jmasa@952 809 // valid. For example, an object may reference another object
jmasa@952 810 // that has already been sweep up (collected). This method uses
jmasa@952 811 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 812 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 813 // object is decided.
jmasa@952 814
jmasa@952 815 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 816 assert_lock_strong(freelistLock());
jmasa@952 817 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 818 HeapWord *cur, *limit;
jmasa@952 819 size_t curSize;
jmasa@952 820 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 821 cur += curSize) {
jmasa@952 822 curSize = block_size(cur);
jmasa@952 823 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 824 blk->do_object(oop(cur));
jmasa@952 825 }
jmasa@952 826 }
jmasa@952 827 }
jmasa@952 828
duke@435 829 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 830 UpwardsObjectClosure* cl) {
ysr@1580 831 assert_locked(freelistLock());
duke@435 832 NOT_PRODUCT(verify_objects_initialized());
duke@435 833 Space::object_iterate_mem(mr, cl);
duke@435 834 }
duke@435 835
duke@435 836 // Callers of this iterator beware: The closure application should
duke@435 837 // be robust in the face of uninitialized objects and should (always)
duke@435 838 // return a correct size so that the next addr + size below gives us a
duke@435 839 // valid block boundary. [See for instance,
duke@435 840 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 841 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 842 HeapWord*
duke@435 843 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 844 assert_lock_strong(freelistLock());
duke@435 845 HeapWord *addr, *last;
duke@435 846 size_t size;
duke@435 847 for (addr = bottom(), last = end();
duke@435 848 addr < last; addr += size) {
duke@435 849 FreeChunk* fc = (FreeChunk*)addr;
duke@435 850 if (fc->isFree()) {
duke@435 851 // Since we hold the free list lock, which protects direct
duke@435 852 // allocation in this generation by mutators, a free object
duke@435 853 // will remain free throughout this iteration code.
duke@435 854 size = fc->size();
duke@435 855 } else {
duke@435 856 // Note that the object need not necessarily be initialized,
duke@435 857 // because (for instance) the free list lock does NOT protect
duke@435 858 // object initialization. The closure application below must
duke@435 859 // therefore be correct in the face of uninitialized objects.
duke@435 860 size = cl->do_object_careful(oop(addr));
duke@435 861 if (size == 0) {
duke@435 862 // An unparsable object found. Signal early termination.
duke@435 863 return addr;
duke@435 864 }
duke@435 865 }
duke@435 866 }
duke@435 867 return NULL;
duke@435 868 }
duke@435 869
duke@435 870 // Callers of this iterator beware: The closure application should
duke@435 871 // be robust in the face of uninitialized objects and should (always)
duke@435 872 // return a correct size so that the next addr + size below gives us a
duke@435 873 // valid block boundary. [See for instance,
duke@435 874 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 875 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 876 HeapWord*
duke@435 877 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 878 ObjectClosureCareful* cl) {
duke@435 879 assert_lock_strong(freelistLock());
duke@435 880 // Can't use used_region() below because it may not necessarily
duke@435 881 // be the same as [bottom(),end()); although we could
duke@435 882 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 883 // that appears too cumbersome, so we just do the simpler check
duke@435 884 // in the assertion below.
duke@435 885 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 886 "mr should be non-empty and within used space");
duke@435 887 HeapWord *addr, *end;
duke@435 888 size_t size;
duke@435 889 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 890 addr < end; addr += size) {
duke@435 891 FreeChunk* fc = (FreeChunk*)addr;
duke@435 892 if (fc->isFree()) {
duke@435 893 // Since we hold the free list lock, which protects direct
duke@435 894 // allocation in this generation by mutators, a free object
duke@435 895 // will remain free throughout this iteration code.
duke@435 896 size = fc->size();
duke@435 897 } else {
duke@435 898 // Note that the object need not necessarily be initialized,
duke@435 899 // because (for instance) the free list lock does NOT protect
duke@435 900 // object initialization. The closure application below must
duke@435 901 // therefore be correct in the face of uninitialized objects.
duke@435 902 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 903 if (size == 0) {
duke@435 904 // An unparsable object found. Signal early termination.
duke@435 905 return addr;
duke@435 906 }
duke@435 907 }
duke@435 908 }
duke@435 909 return NULL;
duke@435 910 }
duke@435 911
duke@435 912
ysr@777 913 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 914 NOT_PRODUCT(verify_objects_initialized());
duke@435 915 return _bt.block_start(p);
duke@435 916 }
duke@435 917
duke@435 918 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 919 return _bt.block_start_careful(p);
duke@435 920 }
duke@435 921
duke@435 922 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 923 NOT_PRODUCT(verify_objects_initialized());
duke@435 924 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 925 // This must be volatile, or else there is a danger that the compiler
duke@435 926 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 927 // the value read the first time in a register.
duke@435 928 while (true) {
duke@435 929 // We must do this until we get a consistent view of the object.
coleenp@622 930 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 931 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 932 size_t res = fc->size();
coleenp@622 933 // If the object is still a free chunk, return the size, else it
coleenp@622 934 // has been allocated so try again.
coleenp@622 935 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 936 assert(res != 0, "Block size should not be 0");
duke@435 937 return res;
duke@435 938 }
coleenp@622 939 } else {
coleenp@622 940 // must read from what 'p' points to in each loop.
coleenp@622 941 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 942 if (k != NULL) {
coleenp@622 943 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
coleenp@622 944 oop o = (oop)p;
coleenp@622 945 assert(o->is_parsable(), "Should be parsable");
coleenp@622 946 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 947 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 948 res = adjustObjectSize(res);
coleenp@622 949 assert(res != 0, "Block size should not be 0");
coleenp@622 950 return res;
coleenp@622 951 }
duke@435 952 }
duke@435 953 }
duke@435 954 }
duke@435 955
duke@435 956 // A variant of the above that uses the Printezis bits for
duke@435 957 // unparsable but allocated objects. This avoids any possible
duke@435 958 // stalls waiting for mutators to initialize objects, and is
duke@435 959 // thus potentially faster than the variant above. However,
duke@435 960 // this variant may return a zero size for a block that is
duke@435 961 // under mutation and for which a consistent size cannot be
duke@435 962 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 963 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 964 const CMSCollector* c)
duke@435 965 const {
duke@435 966 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 967 // This must be volatile, or else there is a danger that the compiler
duke@435 968 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 969 // the value read the first time in a register.
duke@435 970 DEBUG_ONLY(uint loops = 0;)
duke@435 971 while (true) {
duke@435 972 // We must do this until we get a consistent view of the object.
coleenp@622 973 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 974 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 975 size_t res = fc->size();
coleenp@622 976 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 977 assert(res != 0, "Block size should not be 0");
duke@435 978 assert(loops == 0, "Should be 0");
duke@435 979 return res;
duke@435 980 }
duke@435 981 } else {
coleenp@622 982 // must read from what 'p' points to in each loop.
coleenp@622 983 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
jmasa@953 984 if (k != NULL &&
jmasa@953 985 ((oopDesc*)p)->is_parsable() &&
jmasa@953 986 ((oopDesc*)p)->is_conc_safe()) {
coleenp@622 987 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 988 oop o = (oop)p;
coleenp@622 989 assert(o->is_oop(), "Should be an oop");
coleenp@622 990 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 991 res = adjustObjectSize(res);
coleenp@622 992 assert(res != 0, "Block size should not be 0");
coleenp@622 993 return res;
coleenp@622 994 } else {
coleenp@622 995 return c->block_size_if_printezis_bits(p);
coleenp@622 996 }
duke@435 997 }
duke@435 998 assert(loops == 0, "Can loop at most once");
duke@435 999 DEBUG_ONLY(loops++;)
duke@435 1000 }
duke@435 1001 }
duke@435 1002
duke@435 1003 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1004 NOT_PRODUCT(verify_objects_initialized());
duke@435 1005 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1006 FreeChunk* fc = (FreeChunk*)p;
duke@435 1007 if (fc->isFree()) {
duke@435 1008 return fc->size();
duke@435 1009 } else {
duke@435 1010 // Ignore mark word because this may be a recently promoted
duke@435 1011 // object whose mark word is used to chain together grey
duke@435 1012 // objects (the last one would have a null value).
duke@435 1013 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1014 return adjustObjectSize(oop(p)->size());
duke@435 1015 }
duke@435 1016 }
duke@435 1017
duke@435 1018 // This implementation assumes that the property of "being an object" is
duke@435 1019 // stable. But being a free chunk may not be (because of parallel
duke@435 1020 // promotion.)
duke@435 1021 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1022 FreeChunk* fc = (FreeChunk*)p;
duke@435 1023 assert(is_in_reserved(p), "Should be in space");
duke@435 1024 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1025 // assertion may fail because prepare_for_compaction() uses
duke@435 1026 // space that is garbage to maintain information on ranges of
duke@435 1027 // live objects so that these live ranges can be moved as a whole.
duke@435 1028 // Comment out this assertion until that problem can be solved
duke@435 1029 // (i.e., that the block start calculation may look at objects
duke@435 1030 // at address below "p" in finding the object that contains "p"
duke@435 1031 // and those objects (if garbage) may have been modified to hold
duke@435 1032 // live range information.
duke@435 1033 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
coleenp@622 1034 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1035 klassOop k = oop(p)->klass_or_null();
duke@435 1036 if (k != NULL) {
duke@435 1037 // Ignore mark word because it may have been used to
duke@435 1038 // chain together promoted objects (the last one
duke@435 1039 // would have a null value).
duke@435 1040 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1041 return true;
duke@435 1042 } else {
duke@435 1043 return false; // Was not an object at the start of collection.
duke@435 1044 }
duke@435 1045 }
duke@435 1046
duke@435 1047 // Check if the object is alive. This fact is checked either by consulting
duke@435 1048 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1049 // generation and we're not in the sweeping phase, by checking the
duke@435 1050 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1051 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1052 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
duke@435 1053 assert (block_is_obj(p), "The address should point to an object");
duke@435 1054
duke@435 1055 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1056 // for both perm gen and old gen.
duke@435 1057 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1058 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1059 // main marking bit map (live_map below) is locked,
duke@435 1060 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1061 // is stable, because it's mutated only in the sweeping phase.
duke@435 1062 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1063 CMSBitMap* live_map = _collector->markBitMap();
duke@435 1064 return live_map->isMarked((HeapWord*) p);
duke@435 1065 } else {
duke@435 1066 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1067 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1068 // in the perm gen. In this case, we use the "deadness" information
duke@435 1069 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1070 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1071 if (_collector->verifying()) {
duke@435 1072 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1073 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1074 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1075 // the object is alive.
duke@435 1076 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1077 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1078 } else {
duke@435 1079 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1080 }
duke@435 1081 }
duke@435 1082 }
duke@435 1083 return true;
duke@435 1084 }
duke@435 1085
duke@435 1086 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1087 FreeChunk* fc = (FreeChunk*)p;
duke@435 1088 assert(is_in_reserved(p), "Should be in space");
duke@435 1089 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 1090 if (!fc->isFree()) {
duke@435 1091 // Ignore mark word because it may have been used to
duke@435 1092 // chain together promoted objects (the last one
duke@435 1093 // would have a null value).
duke@435 1094 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1095 return true;
duke@435 1096 }
duke@435 1097 return false;
duke@435 1098 }
duke@435 1099
duke@435 1100 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1101 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1102 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1103 size_t size = 0;
duke@435 1104 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1105 debug_only(
duke@435 1106 // We may be calling here without the lock in which case we
duke@435 1107 // won't do this modest sanity check.
duke@435 1108 if (freelistLock()->owned_by_self()) {
duke@435 1109 size_t total_list_size = 0;
duke@435 1110 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1111 fc = fc->next()) {
duke@435 1112 total_list_size += i;
duke@435 1113 }
duke@435 1114 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1115 "Count in list is incorrect");
duke@435 1116 }
duke@435 1117 )
duke@435 1118 size += i * _indexedFreeList[i].count();
duke@435 1119 }
duke@435 1120 return size;
duke@435 1121 }
duke@435 1122
duke@435 1123 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1124 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1125 return allocate(size);
duke@435 1126 }
duke@435 1127
duke@435 1128 HeapWord*
duke@435 1129 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1130 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1131 }
duke@435 1132
duke@435 1133 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1134 assert_lock_strong(freelistLock());
duke@435 1135 HeapWord* res = NULL;
duke@435 1136 assert(size == adjustObjectSize(size),
duke@435 1137 "use adjustObjectSize() before calling into allocate()");
duke@435 1138
duke@435 1139 if (_adaptive_freelists) {
duke@435 1140 res = allocate_adaptive_freelists(size);
duke@435 1141 } else { // non-adaptive free lists
duke@435 1142 res = allocate_non_adaptive_freelists(size);
duke@435 1143 }
duke@435 1144
duke@435 1145 if (res != NULL) {
duke@435 1146 // check that res does lie in this space!
duke@435 1147 assert(is_in_reserved(res), "Not in this space!");
duke@435 1148 assert(is_aligned((void*)res), "alignment check");
duke@435 1149
duke@435 1150 FreeChunk* fc = (FreeChunk*)res;
duke@435 1151 fc->markNotFree();
duke@435 1152 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1153 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1154 // Verify that the block offset table shows this to
duke@435 1155 // be a single block, but not one which is unallocated.
duke@435 1156 _bt.verify_single_block(res, size);
duke@435 1157 _bt.verify_not_unallocated(res, size);
duke@435 1158 // mangle a just allocated object with a distinct pattern.
duke@435 1159 debug_only(fc->mangleAllocated(size));
duke@435 1160 }
duke@435 1161
duke@435 1162 return res;
duke@435 1163 }
duke@435 1164
duke@435 1165 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1166 HeapWord* res = NULL;
duke@435 1167 // try and use linear allocation for smaller blocks
duke@435 1168 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1169 // if successful, the following also adjusts block offset table
duke@435 1170 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1171 }
duke@435 1172 // Else triage to indexed lists for smaller sizes
duke@435 1173 if (res == NULL) {
duke@435 1174 if (size < SmallForDictionary) {
duke@435 1175 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1176 } else {
duke@435 1177 // else get it from the big dictionary; if even this doesn't
duke@435 1178 // work we are out of luck.
duke@435 1179 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1180 }
duke@435 1181 }
duke@435 1182
duke@435 1183 return res;
duke@435 1184 }
duke@435 1185
duke@435 1186 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1187 assert_lock_strong(freelistLock());
duke@435 1188 HeapWord* res = NULL;
duke@435 1189 assert(size == adjustObjectSize(size),
duke@435 1190 "use adjustObjectSize() before calling into allocate()");
duke@435 1191
duke@435 1192 // Strategy
duke@435 1193 // if small
duke@435 1194 // exact size from small object indexed list if small
duke@435 1195 // small or large linear allocation block (linAB) as appropriate
duke@435 1196 // take from lists of greater sized chunks
duke@435 1197 // else
duke@435 1198 // dictionary
duke@435 1199 // small or large linear allocation block if it has the space
duke@435 1200 // Try allocating exact size from indexTable first
duke@435 1201 if (size < IndexSetSize) {
duke@435 1202 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1203 if(res != NULL) {
duke@435 1204 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1205 "Not removed from free list");
duke@435 1206 // no block offset table adjustment is necessary on blocks in
duke@435 1207 // the indexed lists.
duke@435 1208
duke@435 1209 // Try allocating from the small LinAB
duke@435 1210 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1211 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1212 // if successful, the above also adjusts block offset table
duke@435 1213 // Note that this call will refill the LinAB to
duke@435 1214 // satisfy the request. This is different that
duke@435 1215 // evm.
duke@435 1216 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1217
duke@435 1218 } else {
duke@435 1219 // Raid the exact free lists larger than size, even if they are not
duke@435 1220 // overpopulated.
duke@435 1221 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1222 }
duke@435 1223 } else {
duke@435 1224 // Big objects get allocated directly from the dictionary.
duke@435 1225 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1226 if (res == NULL) {
duke@435 1227 // Try hard not to fail since an allocation failure will likely
duke@435 1228 // trigger a synchronous GC. Try to get the space from the
duke@435 1229 // allocation blocks.
duke@435 1230 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1231 }
duke@435 1232 }
duke@435 1233
duke@435 1234 return res;
duke@435 1235 }
duke@435 1236
duke@435 1237 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1238 // when promoting obj.
duke@435 1239 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1240 // Depending on the object size, expansion may require refilling either a
duke@435 1241 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1242 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1243 size_t space = obj_size;
duke@435 1244 if (!_adaptive_freelists) {
duke@435 1245 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1246 }
duke@435 1247 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1248 return space;
duke@435 1249 }
duke@435 1250
duke@435 1251 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1252 FreeChunk* ret;
duke@435 1253
duke@435 1254 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1255 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1256 "Should not be here");
duke@435 1257
duke@435 1258 size_t i;
duke@435 1259 size_t currSize = numWords + MinChunkSize;
duke@435 1260 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1261 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1262 FreeList* fl = &_indexedFreeList[i];
duke@435 1263 if (fl->head()) {
duke@435 1264 ret = getFromListGreater(fl, numWords);
duke@435 1265 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1266 return ret;
duke@435 1267 }
duke@435 1268 }
duke@435 1269
duke@435 1270 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1271 (size_t)(numWords + MinChunkSize));
duke@435 1272
duke@435 1273 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1274 fragmentation that can't be handled. */
duke@435 1275 {
duke@435 1276 ret = dictionary()->getChunk(currSize);
duke@435 1277 if (ret != NULL) {
duke@435 1278 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1279 "Chunk is too small");
duke@435 1280 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1281 /* Carve returned chunk. */
duke@435 1282 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1283 /* Label this as no longer a free chunk. */
duke@435 1284 assert(ret->isFree(), "This chunk should be free");
duke@435 1285 ret->linkPrev(NULL);
duke@435 1286 }
duke@435 1287 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1288 return ret;
duke@435 1289 }
duke@435 1290 ShouldNotReachHere();
duke@435 1291 }
duke@435 1292
duke@435 1293 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
duke@435 1294 const {
duke@435 1295 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1296 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1297 }
duke@435 1298
duke@435 1299 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
duke@435 1300 if (fc->size() >= IndexSetSize) {
duke@435 1301 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1302 } else {
duke@435 1303 return verifyChunkInIndexedFreeLists(fc);
duke@435 1304 }
duke@435 1305 }
duke@435 1306
duke@435 1307 #ifndef PRODUCT
duke@435 1308 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1309 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1310 }
ysr@1580 1311
ysr@1580 1312 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1313 CMSLockVerifier::assert_locked(lock);
ysr@1580 1314 }
duke@435 1315 #endif
duke@435 1316
duke@435 1317 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1318 // In the parallel case, the main thread holds the free list lock
duke@435 1319 // on behalf the parallel threads.
duke@435 1320 FreeChunk* fc;
duke@435 1321 {
duke@435 1322 // If GC is parallel, this might be called by several threads.
duke@435 1323 // This should be rare enough that the locking overhead won't affect
duke@435 1324 // the sequential code.
duke@435 1325 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1326 Mutex::_no_safepoint_check_flag);
duke@435 1327 fc = getChunkFromDictionary(size);
duke@435 1328 }
duke@435 1329 if (fc != NULL) {
duke@435 1330 fc->dontCoalesce();
duke@435 1331 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1332 // Verify that the block offset table shows this to
duke@435 1333 // be a single block, but not one which is unallocated.
duke@435 1334 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1335 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1336 }
duke@435 1337 return fc;
duke@435 1338 }
duke@435 1339
coleenp@548 1340 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1341 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1342 assert_locked();
duke@435 1343
duke@435 1344 // if we are tracking promotions, then first ensure space for
duke@435 1345 // promotion (including spooling space for saving header if necessary).
duke@435 1346 // then allocate and copy, then track promoted info if needed.
duke@435 1347 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1348 // be displaced and in this case restoration of the mark word
duke@435 1349 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1350 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1351 return NULL;
duke@435 1352 }
duke@435 1353 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1354 // additional call through the allocate(size_t) form. Having
duke@435 1355 // the compile inline the call is problematic because allocate(size_t)
duke@435 1356 // is a virtual method.
duke@435 1357 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1358 if (res != NULL) {
duke@435 1359 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1360 // if we should be tracking promotions, do so.
duke@435 1361 if (_promoInfo.tracking()) {
duke@435 1362 _promoInfo.track((PromotedObject*)res);
duke@435 1363 }
duke@435 1364 }
duke@435 1365 return oop(res);
duke@435 1366 }
duke@435 1367
duke@435 1368 HeapWord*
duke@435 1369 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1370 assert_locked();
duke@435 1371 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1372 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1373 "maximum from smallLinearAllocBlock");
duke@435 1374 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1375 }
duke@435 1376
duke@435 1377 HeapWord*
duke@435 1378 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1379 size_t size) {
duke@435 1380 assert_locked();
duke@435 1381 assert(size >= MinChunkSize, "too small");
duke@435 1382 HeapWord* res = NULL;
duke@435 1383 // Try to do linear allocation from blk, making sure that
duke@435 1384 if (blk->_word_size == 0) {
duke@435 1385 // We have probably been unable to fill this either in the prologue or
duke@435 1386 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1387 // next time.
duke@435 1388 assert(blk->_ptr == NULL, "consistency check");
duke@435 1389 return NULL;
duke@435 1390 }
duke@435 1391 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1392 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1393 if (res != NULL) return res;
duke@435 1394
duke@435 1395 // about to exhaust this linear allocation block
duke@435 1396 if (blk->_word_size == size) { // exactly satisfied
duke@435 1397 res = blk->_ptr;
duke@435 1398 _bt.allocated(res, blk->_word_size);
duke@435 1399 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1400 size_t sz = blk->_word_size;
duke@435 1401 // Update _unallocated_block if the size is such that chunk would be
duke@435 1402 // returned to the indexed free list. All other chunks in the indexed
duke@435 1403 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1404 // has already been adjusted for them. Do it here so that the cost
duke@435 1405 // for all chunks added back to the indexed free lists.
ysr@1580 1406 if (sz < SmallForDictionary) {
ysr@1580 1407 _bt.allocated(blk->_ptr, sz);
duke@435 1408 }
duke@435 1409 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1410 addChunkToFreeLists(blk->_ptr, sz);
ysr@1580 1411 splitBirth(sz);
duke@435 1412 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1413 } else {
duke@435 1414 // A refilled block would not satisfy the request.
duke@435 1415 return NULL;
duke@435 1416 }
duke@435 1417
duke@435 1418 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1419 refillLinearAllocBlock(blk);
duke@435 1420 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1421 "block was replenished");
duke@435 1422 if (res != NULL) {
duke@435 1423 splitBirth(size);
duke@435 1424 repairLinearAllocBlock(blk);
duke@435 1425 } else if (blk->_ptr != NULL) {
duke@435 1426 res = blk->_ptr;
duke@435 1427 size_t blk_size = blk->_word_size;
duke@435 1428 blk->_word_size -= size;
duke@435 1429 blk->_ptr += size;
duke@435 1430 splitBirth(size);
duke@435 1431 repairLinearAllocBlock(blk);
duke@435 1432 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1433 // view of the BOT and free blocks.
duke@435 1434 // Above must occur before BOT is updated below.
duke@435 1435 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1436 }
duke@435 1437 return res;
duke@435 1438 }
duke@435 1439
duke@435 1440 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1441 LinearAllocBlock* blk,
duke@435 1442 size_t size) {
duke@435 1443 assert_locked();
duke@435 1444 assert(size >= MinChunkSize, "too small");
duke@435 1445
duke@435 1446 HeapWord* res = NULL;
duke@435 1447 // This is the common case. Keep it simple.
duke@435 1448 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1449 assert(blk->_ptr != NULL, "consistency check");
duke@435 1450 res = blk->_ptr;
duke@435 1451 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1452 // indicates the start of the linAB. The split_block() updates the
duke@435 1453 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1454 // next chunk to be allocated).
duke@435 1455 size_t blk_size = blk->_word_size;
duke@435 1456 blk->_word_size -= size;
duke@435 1457 blk->_ptr += size;
duke@435 1458 splitBirth(size);
duke@435 1459 repairLinearAllocBlock(blk);
duke@435 1460 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1461 // view of the BOT and free blocks.
duke@435 1462 // Above must occur before BOT is updated below.
duke@435 1463 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1464 _bt.allocated(res, size);
duke@435 1465 }
duke@435 1466 return res;
duke@435 1467 }
duke@435 1468
duke@435 1469 FreeChunk*
duke@435 1470 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1471 assert_locked();
duke@435 1472 assert(size < SmallForDictionary, "just checking");
duke@435 1473 FreeChunk* res;
duke@435 1474 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1475 if (res == NULL) {
duke@435 1476 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1477 }
duke@435 1478 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1479 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1480 return res;
duke@435 1481 }
duke@435 1482
duke@435 1483 FreeChunk*
ysr@1580 1484 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1485 bool replenish) {
duke@435 1486 assert_locked();
duke@435 1487 FreeChunk* fc = NULL;
duke@435 1488 if (size < SmallForDictionary) {
duke@435 1489 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1490 _indexedFreeList[size].surplus() <= 0,
duke@435 1491 "List for this size should be empty or under populated");
duke@435 1492 // Try best fit in exact lists before replenishing the list
duke@435 1493 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1494 // Replenish list.
duke@435 1495 //
duke@435 1496 // Things tried that failed.
duke@435 1497 // Tried allocating out of the two LinAB's first before
duke@435 1498 // replenishing lists.
duke@435 1499 // Tried small linAB of size 256 (size in indexed list)
duke@435 1500 // and replenishing indexed lists from the small linAB.
duke@435 1501 //
duke@435 1502 FreeChunk* newFc = NULL;
ysr@1580 1503 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1504 if (replenish_size < SmallForDictionary) {
duke@435 1505 // Do not replenish from an underpopulated size.
duke@435 1506 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1507 _indexedFreeList[replenish_size].head() != NULL) {
ysr@1580 1508 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
ysr@1580 1509 } else if (bestFitFirst()) {
duke@435 1510 newFc = bestFitSmall(replenish_size);
duke@435 1511 }
duke@435 1512 }
ysr@1580 1513 if (newFc == NULL && replenish_size > size) {
ysr@1580 1514 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1515 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1516 }
ysr@1580 1517 // Note: The stats update re split-death of block obtained above
ysr@1580 1518 // will be recorded below precisely when we know we are going to
ysr@1580 1519 // be actually splitting it into more than one pieces below.
duke@435 1520 if (newFc != NULL) {
ysr@1580 1521 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1522 // Replenish this list and return one block to caller.
ysr@1580 1523 size_t i;
ysr@1580 1524 FreeChunk *curFc, *nextFc;
ysr@1580 1525 size_t num_blk = newFc->size() / size;
ysr@1580 1526 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1527 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1528 if (num_blk > 1) {
ysr@1580 1529 // we are sure we will be splitting the block just obtained
ysr@1580 1530 // into multiple pieces; record the split-death of the original
ysr@1580 1531 splitDeath(replenish_size);
ysr@1580 1532 }
ysr@1580 1533 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1534 // The last chunk is not added to the lists but is returned as the
ysr@1580 1535 // free chunk.
ysr@1580 1536 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1537 i = 0;
ysr@1580 1538 i < (num_blk - 1);
ysr@1580 1539 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1540 i++) {
ysr@1580 1541 curFc->setSize(size);
ysr@1580 1542 // Don't record this as a return in order to try and
ysr@1580 1543 // determine the "returns" from a GC.
ysr@1580 1544 _bt.verify_not_unallocated((HeapWord*) fc, size);
ysr@1580 1545 _indexedFreeList[size].returnChunkAtTail(curFc, false);
ysr@1580 1546 _bt.mark_block((HeapWord*)curFc, size);
ysr@1580 1547 splitBirth(size);
ysr@1580 1548 // Don't record the initial population of the indexed list
ysr@1580 1549 // as a split birth.
ysr@1580 1550 }
ysr@1580 1551
ysr@1580 1552 // check that the arithmetic was OK above
ysr@1580 1553 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1554 "inconsistency in carving newFc");
duke@435 1555 curFc->setSize(size);
duke@435 1556 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1557 splitBirth(size);
ysr@1580 1558 fc = curFc;
ysr@1580 1559 } else {
ysr@1580 1560 // Return entire block to caller
ysr@1580 1561 fc = newFc;
duke@435 1562 }
duke@435 1563 }
duke@435 1564 }
duke@435 1565 } else {
duke@435 1566 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1567 // replenish the indexed free list.
duke@435 1568 fc = getChunkFromDictionaryExact(size);
duke@435 1569 }
ysr@1580 1570 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1571 return fc;
duke@435 1572 }
duke@435 1573
duke@435 1574 FreeChunk*
duke@435 1575 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1576 assert_locked();
duke@435 1577 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1578 if (fc == NULL) {
duke@435 1579 return NULL;
duke@435 1580 }
duke@435 1581 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1582 if (fc->size() >= size + MinChunkSize) {
duke@435 1583 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1584 }
duke@435 1585 assert(fc->size() >= size, "chunk too small");
duke@435 1586 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1587 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1588 return fc;
duke@435 1589 }
duke@435 1590
duke@435 1591 FreeChunk*
duke@435 1592 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1593 assert_locked();
duke@435 1594 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1595 if (fc == NULL) {
duke@435 1596 return fc;
duke@435 1597 }
duke@435 1598 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1599 if (fc->size() == size) {
duke@435 1600 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1601 return fc;
duke@435 1602 }
duke@435 1603 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1604 if (fc->size() < size + MinChunkSize) {
duke@435 1605 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1606 returnChunkToDictionary(fc);
duke@435 1607 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1608 if (fc == NULL) {
duke@435 1609 return NULL;
duke@435 1610 }
duke@435 1611 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1612 }
duke@435 1613 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1614 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1615 assert(fc->size() == size, "chunk is wrong size");
duke@435 1616 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1617 return fc;
duke@435 1618 }
duke@435 1619
duke@435 1620 void
duke@435 1621 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1622 assert_locked();
duke@435 1623
duke@435 1624 size_t size = chunk->size();
duke@435 1625 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1626 // adjust _unallocated_block downward, as necessary
duke@435 1627 _bt.freed((HeapWord*)chunk, size);
duke@435 1628 _dictionary->returnChunk(chunk);
ysr@1580 1629 #ifndef PRODUCT
ysr@1580 1630 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1631 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1632 }
ysr@1580 1633 #endif // PRODUCT
duke@435 1634 }
duke@435 1635
duke@435 1636 void
duke@435 1637 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1638 assert_locked();
duke@435 1639 size_t size = fc->size();
duke@435 1640 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1641 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1642 if (_adaptive_freelists) {
duke@435 1643 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1644 } else {
duke@435 1645 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1646 }
ysr@1580 1647 #ifndef PRODUCT
ysr@1580 1648 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1649 _indexedFreeList[size].verify_stats();
ysr@1580 1650 }
ysr@1580 1651 #endif // PRODUCT
duke@435 1652 }
duke@435 1653
duke@435 1654 // Add chunk to end of last block -- if it's the largest
duke@435 1655 // block -- and update BOT and census data. We would
duke@435 1656 // of course have preferred to coalesce it with the
duke@435 1657 // last block, but it's currently less expensive to find the
duke@435 1658 // largest block than it is to find the last.
duke@435 1659 void
duke@435 1660 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1661 HeapWord* chunk, size_t size) {
duke@435 1662 // check that the chunk does lie in this space!
duke@435 1663 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1664 // One of the parallel gc task threads may be here
duke@435 1665 // whilst others are allocating.
duke@435 1666 Mutex* lock = NULL;
duke@435 1667 if (ParallelGCThreads != 0) {
duke@435 1668 lock = &_parDictionaryAllocLock;
duke@435 1669 }
duke@435 1670 FreeChunk* ec;
duke@435 1671 {
duke@435 1672 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1673 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1674 if (ec != NULL && ec->end() == chunk) {
duke@435 1675 // It's a coterminal block - we can coalesce.
duke@435 1676 size_t old_size = ec->size();
duke@435 1677 coalDeath(old_size);
duke@435 1678 removeChunkFromDictionary(ec);
duke@435 1679 size += old_size;
duke@435 1680 } else {
duke@435 1681 ec = (FreeChunk*)chunk;
duke@435 1682 }
duke@435 1683 }
duke@435 1684 ec->setSize(size);
duke@435 1685 debug_only(ec->mangleFreed(size));
duke@435 1686 if (size < SmallForDictionary) {
duke@435 1687 lock = _indexedFreeListParLocks[size];
duke@435 1688 }
duke@435 1689 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1690 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1691 // record the birth under the lock since the recording involves
duke@435 1692 // manipulation of the list on which the chunk lives and
duke@435 1693 // if the chunk is allocated and is the last on the list,
duke@435 1694 // the list can go away.
duke@435 1695 coalBirth(size);
duke@435 1696 }
duke@435 1697
duke@435 1698 void
duke@435 1699 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1700 size_t size) {
duke@435 1701 // check that the chunk does lie in this space!
duke@435 1702 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1703 assert_locked();
duke@435 1704 _bt.verify_single_block(chunk, size);
duke@435 1705
duke@435 1706 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1707 fc->setSize(size);
duke@435 1708 debug_only(fc->mangleFreed(size));
duke@435 1709 if (size < SmallForDictionary) {
duke@435 1710 returnChunkToFreeList(fc);
duke@435 1711 } else {
duke@435 1712 returnChunkToDictionary(fc);
duke@435 1713 }
duke@435 1714 }
duke@435 1715
duke@435 1716 void
duke@435 1717 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1718 size_t size, bool coalesced) {
duke@435 1719 assert_locked();
duke@435 1720 assert(chunk != NULL, "null chunk");
duke@435 1721 if (coalesced) {
duke@435 1722 // repair BOT
duke@435 1723 _bt.single_block(chunk, size);
duke@435 1724 }
duke@435 1725 addChunkToFreeLists(chunk, size);
duke@435 1726 }
duke@435 1727
duke@435 1728 // We _must_ find the purported chunk on our free lists;
duke@435 1729 // we assert if we don't.
duke@435 1730 void
duke@435 1731 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1732 size_t size = fc->size();
duke@435 1733 assert_locked();
duke@435 1734 debug_only(verifyFreeLists());
duke@435 1735 if (size < SmallForDictionary) {
duke@435 1736 removeChunkFromIndexedFreeList(fc);
duke@435 1737 } else {
duke@435 1738 removeChunkFromDictionary(fc);
duke@435 1739 }
duke@435 1740 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1741 debug_only(verifyFreeLists());
duke@435 1742 }
duke@435 1743
duke@435 1744 void
duke@435 1745 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1746 size_t size = fc->size();
duke@435 1747 assert_locked();
duke@435 1748 assert(fc != NULL, "null chunk");
duke@435 1749 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1750 _dictionary->removeChunk(fc);
duke@435 1751 // adjust _unallocated_block upward, as necessary
duke@435 1752 _bt.allocated((HeapWord*)fc, size);
duke@435 1753 }
duke@435 1754
duke@435 1755 void
duke@435 1756 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1757 assert_locked();
duke@435 1758 size_t size = fc->size();
duke@435 1759 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1760 NOT_PRODUCT(
duke@435 1761 if (FLSVerifyIndexTable) {
duke@435 1762 verifyIndexedFreeList(size);
duke@435 1763 }
duke@435 1764 )
duke@435 1765 _indexedFreeList[size].removeChunk(fc);
duke@435 1766 debug_only(fc->clearNext());
duke@435 1767 debug_only(fc->clearPrev());
duke@435 1768 NOT_PRODUCT(
duke@435 1769 if (FLSVerifyIndexTable) {
duke@435 1770 verifyIndexedFreeList(size);
duke@435 1771 }
duke@435 1772 )
duke@435 1773 }
duke@435 1774
duke@435 1775 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1776 /* A hint is the next larger size that has a surplus.
duke@435 1777 Start search at a size large enough to guarantee that
duke@435 1778 the excess is >= MIN_CHUNK. */
duke@435 1779 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1780 if (start < IndexSetSize) {
duke@435 1781 FreeList* it = _indexedFreeList;
duke@435 1782 size_t hint = _indexedFreeList[start].hint();
duke@435 1783 while (hint < IndexSetSize) {
duke@435 1784 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1785 FreeList *fl = &_indexedFreeList[hint];
duke@435 1786 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1787 // Found a list with surplus, reset original hint
duke@435 1788 // and split out a free chunk which is returned.
duke@435 1789 _indexedFreeList[start].set_hint(hint);
duke@435 1790 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1791 assert(res == NULL || res->isFree(),
duke@435 1792 "Should be returning a free chunk");
duke@435 1793 return res;
duke@435 1794 }
duke@435 1795 hint = fl->hint(); /* keep looking */
duke@435 1796 }
duke@435 1797 /* None found. */
duke@435 1798 it[start].set_hint(IndexSetSize);
duke@435 1799 }
duke@435 1800 return NULL;
duke@435 1801 }
duke@435 1802
duke@435 1803 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1804 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1805 size_t numWords) {
duke@435 1806 FreeChunk *curr = fl->head();
duke@435 1807 size_t oldNumWords = curr->size();
duke@435 1808 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1809 assert(curr != NULL, "List is empty");
duke@435 1810 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1811 "Size of chunks in the list is too small");
duke@435 1812
duke@435 1813 fl->removeChunk(curr);
duke@435 1814 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1815 // smallSplit(oldNumWords, numWords);
duke@435 1816 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1817 // Does anything have to be done for the remainder in terms of
duke@435 1818 // fixing the card table?
duke@435 1819 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1820 "Should be returning a free chunk");
duke@435 1821 return new_chunk;
duke@435 1822 }
duke@435 1823
duke@435 1824 FreeChunk*
duke@435 1825 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1826 size_t new_size) {
duke@435 1827 assert_locked();
duke@435 1828 size_t size = chunk->size();
duke@435 1829 assert(size > new_size, "Split from a smaller block?");
duke@435 1830 assert(is_aligned(chunk), "alignment problem");
duke@435 1831 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1832 size_t rem_size = size - new_size;
duke@435 1833 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1834 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1835 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1836 assert(is_aligned(ffc), "alignment problem");
duke@435 1837 ffc->setSize(rem_size);
duke@435 1838 ffc->linkNext(NULL);
duke@435 1839 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1840 // Above must occur before BOT is updated below.
duke@435 1841 // adjust block offset table
duke@435 1842 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1843 if (rem_size < SmallForDictionary) {
duke@435 1844 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1845 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1846 returnChunkToFreeList(ffc);
duke@435 1847 split(size, rem_size);
duke@435 1848 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1849 } else {
duke@435 1850 returnChunkToDictionary(ffc);
duke@435 1851 split(size ,rem_size);
duke@435 1852 }
duke@435 1853 chunk->setSize(new_size);
duke@435 1854 return chunk;
duke@435 1855 }
duke@435 1856
duke@435 1857 void
duke@435 1858 CompactibleFreeListSpace::sweep_completed() {
duke@435 1859 // Now that space is probably plentiful, refill linear
duke@435 1860 // allocation blocks as needed.
duke@435 1861 refillLinearAllocBlocksIfNeeded();
duke@435 1862 }
duke@435 1863
duke@435 1864 void
duke@435 1865 CompactibleFreeListSpace::gc_prologue() {
duke@435 1866 assert_locked();
duke@435 1867 if (PrintFLSStatistics != 0) {
duke@435 1868 gclog_or_tty->print("Before GC:\n");
duke@435 1869 reportFreeListStatistics();
duke@435 1870 }
duke@435 1871 refillLinearAllocBlocksIfNeeded();
duke@435 1872 }
duke@435 1873
duke@435 1874 void
duke@435 1875 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1876 assert_locked();
duke@435 1877 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1878 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1879 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1880 }
duke@435 1881 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1882 _promoInfo.stopTrackingPromotions();
duke@435 1883 repairLinearAllocationBlocks();
duke@435 1884 // Print Space's stats
duke@435 1885 if (PrintFLSStatistics != 0) {
duke@435 1886 gclog_or_tty->print("After GC:\n");
duke@435 1887 reportFreeListStatistics();
duke@435 1888 }
duke@435 1889 }
duke@435 1890
duke@435 1891 // Iteration support, mostly delegated from a CMS generation
duke@435 1892
duke@435 1893 void CompactibleFreeListSpace::save_marks() {
duke@435 1894 // mark the "end" of the used space at the time of this call;
duke@435 1895 // note, however, that promoted objects from this point
duke@435 1896 // on are tracked in the _promoInfo below.
duke@435 1897 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
duke@435 1898 unallocated_block() : end());
duke@435 1899 // inform allocator that promotions should be tracked.
duke@435 1900 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1901 _promoInfo.startTrackingPromotions();
duke@435 1902 }
duke@435 1903
duke@435 1904 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1905 assert(_promoInfo.tracking(), "No preceding save_marks?");
duke@435 1906 guarantee(SharedHeap::heap()->n_par_threads() == 0,
duke@435 1907 "Shouldn't be called (yet) during parallel part of gc.");
duke@435 1908 return _promoInfo.noPromotions();
duke@435 1909 }
duke@435 1910
duke@435 1911 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 1912 \
duke@435 1913 void CompactibleFreeListSpace:: \
duke@435 1914 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 1915 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 1916 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 1917 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 1918 /* \
duke@435 1919 * This also restores any displaced headers and removes the elements from \
duke@435 1920 * the iteration set as they are processed, so that we have a clean slate \
duke@435 1921 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 1922 * promoted as a result of the iteration they are iterated over as well. \
duke@435 1923 */ \
duke@435 1924 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 1925 }
duke@435 1926
duke@435 1927 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 1928
duke@435 1929 //////////////////////////////////////////////////////////////////////////////
duke@435 1930 // We go over the list of promoted objects, removing each from the list,
duke@435 1931 // and applying the closure (this may, in turn, add more elements to
duke@435 1932 // the tail of the promoted list, and these newly added objects will
duke@435 1933 // also be processed) until the list is empty.
duke@435 1934 // To aid verification and debugging, in the non-product builds
duke@435 1935 // we actually forward _promoHead each time we process a promoted oop.
duke@435 1936 // Note that this is not necessary in general (i.e. when we don't need to
duke@435 1937 // call PromotionInfo::verify()) because oop_iterate can only add to the
duke@435 1938 // end of _promoTail, and never needs to look at _promoHead.
duke@435 1939
duke@435 1940 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 1941 \
duke@435 1942 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
duke@435 1943 NOT_PRODUCT(verify()); \
duke@435 1944 PromotedObject *curObj, *nextObj; \
duke@435 1945 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
duke@435 1946 if ((nextObj = curObj->next()) == NULL) { \
duke@435 1947 /* protect ourselves against additions due to closure application \
duke@435 1948 below by resetting the list. */ \
duke@435 1949 assert(_promoTail == curObj, "Should have been the tail"); \
duke@435 1950 _promoHead = _promoTail = NULL; \
duke@435 1951 } \
duke@435 1952 if (curObj->hasDisplacedMark()) { \
duke@435 1953 /* restore displaced header */ \
duke@435 1954 oop(curObj)->set_mark(nextDisplacedHeader()); \
duke@435 1955 } else { \
duke@435 1956 /* restore prototypical header */ \
duke@435 1957 oop(curObj)->init_mark(); \
duke@435 1958 } \
duke@435 1959 /* The "promoted_mark" should now not be set */ \
duke@435 1960 assert(!curObj->hasPromotedMark(), \
duke@435 1961 "Should have been cleared by restoring displaced mark-word"); \
duke@435 1962 NOT_PRODUCT(_promoHead = nextObj); \
duke@435 1963 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
duke@435 1964 if (nextObj == NULL) { /* start at head of list reset above */ \
duke@435 1965 nextObj = _promoHead; \
duke@435 1966 } \
duke@435 1967 } \
duke@435 1968 assert(noPromotions(), "post-condition violation"); \
duke@435 1969 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
duke@435 1970 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
duke@435 1971 assert(_firstIndex == _nextIndex, "empty buffer"); \
duke@435 1972 }
duke@435 1973
duke@435 1974 // This should have been ALL_SINCE_...() just like the others,
duke@435 1975 // but, because the body of the method above is somehwat longer,
duke@435 1976 // the MSVC compiler cannot cope; as a workaround, we split the
duke@435 1977 // macro into its 3 constituent parts below (see original macro
duke@435 1978 // definition in specializedOopClosures.hpp).
duke@435 1979 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
duke@435 1980 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
duke@435 1981
duke@435 1982
duke@435 1983 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 1984 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 1985 guarantee(false, "NYI");
duke@435 1986 }
duke@435 1987
ysr@447 1988 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 1989 return _smallLinearAllocBlock._word_size == 0;
duke@435 1990 }
duke@435 1991
duke@435 1992 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 1993 // Fix up linear allocation blocks to look like free blocks
duke@435 1994 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 1995 }
duke@435 1996
duke@435 1997 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1998 assert_locked();
duke@435 1999 if (blk->_ptr != NULL) {
duke@435 2000 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2001 "Minimum block size requirement");
duke@435 2002 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 2003 fc->setSize(blk->_word_size);
duke@435 2004 fc->linkPrev(NULL); // mark as free
duke@435 2005 fc->dontCoalesce();
duke@435 2006 assert(fc->isFree(), "just marked it free");
duke@435 2007 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2008 }
duke@435 2009 }
duke@435 2010
duke@435 2011 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2012 assert_locked();
duke@435 2013 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2014 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2015 "Size of linAB should be zero if the ptr is NULL");
duke@435 2016 // Reset the linAB refill and allocation size limit.
duke@435 2017 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2018 }
duke@435 2019 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2020 }
duke@435 2021
duke@435 2022 void
duke@435 2023 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2024 assert_locked();
duke@435 2025 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2026 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2027 "blk invariant");
duke@435 2028 if (blk->_ptr == NULL) {
duke@435 2029 refillLinearAllocBlock(blk);
duke@435 2030 }
duke@435 2031 if (PrintMiscellaneous && Verbose) {
duke@435 2032 if (blk->_word_size == 0) {
duke@435 2033 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2034 }
duke@435 2035 }
duke@435 2036 }
duke@435 2037
duke@435 2038 void
duke@435 2039 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2040 assert_locked();
duke@435 2041 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2042 "linear allocation block should be empty");
duke@435 2043 FreeChunk* fc;
duke@435 2044 if (blk->_refillSize < SmallForDictionary &&
duke@435 2045 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2046 // A linAB's strategy might be to use small sizes to reduce
duke@435 2047 // fragmentation but still get the benefits of allocation from a
duke@435 2048 // linAB.
duke@435 2049 } else {
duke@435 2050 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2051 }
duke@435 2052 if (fc != NULL) {
duke@435 2053 blk->_ptr = (HeapWord*)fc;
duke@435 2054 blk->_word_size = fc->size();
duke@435 2055 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2056 }
duke@435 2057 }
duke@435 2058
ysr@447 2059 // Support for concurrent collection policy decisions.
ysr@447 2060 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2061 // In the future we might want to add in frgamentation stats --
ysr@447 2062 // including erosion of the "mountain" into this decision as well.
ysr@447 2063 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2064 }
ysr@447 2065
duke@435 2066 // Support for compaction
duke@435 2067
duke@435 2068 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2069 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2070 // prepare_for_compaction() uses the space between live objects
duke@435 2071 // so that later phase can skip dead space quickly. So verification
duke@435 2072 // of the free lists doesn't work after.
duke@435 2073 }
duke@435 2074
duke@435 2075 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2076 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2077
duke@435 2078 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2079 // In other versions of adjust_pointers(), a bail out
duke@435 2080 // based on the amount of live data in the generation
duke@435 2081 // (i.e., if 0, bail out) may be used.
duke@435 2082 // Cannot test used() == 0 here because the free lists have already
duke@435 2083 // been mangled by the compaction.
duke@435 2084
duke@435 2085 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2086 // See note about verification in prepare_for_compaction().
duke@435 2087 }
duke@435 2088
duke@435 2089 void CompactibleFreeListSpace::compact() {
duke@435 2090 SCAN_AND_COMPACT(obj_size);
duke@435 2091 }
duke@435 2092
duke@435 2093 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2094 // where fbs is free block sizes
duke@435 2095 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2096 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2097 double frag = 0.0;
duke@435 2098 size_t i;
duke@435 2099
duke@435 2100 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2101 double sz = i;
duke@435 2102 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2103 }
duke@435 2104
duke@435 2105 double totFree = itabFree +
duke@435 2106 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 2107 if (totFree > 0) {
duke@435 2108 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2109 (totFree * totFree));
duke@435 2110 frag = (double)1.0 - frag;
duke@435 2111 } else {
duke@435 2112 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2113 }
duke@435 2114 return frag;
duke@435 2115 }
duke@435 2116
duke@435 2117 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2118 float inter_sweep_current,
ysr@1580 2119 float inter_sweep_estimate,
ysr@1580 2120 float intra_sweep_estimate) {
duke@435 2121 assert_locked();
duke@435 2122 size_t i;
duke@435 2123 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2124 FreeList* fl = &_indexedFreeList[i];
ysr@1580 2125 if (PrintFLSStatistics > 1) {
ysr@1580 2126 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2127 }
ysr@1580 2128 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
ysr@1580 2129 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
duke@435 2130 fl->set_beforeSweep(fl->count());
duke@435 2131 fl->set_bfrSurp(fl->surplus());
duke@435 2132 }
ysr@1580 2133 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
duke@435 2134 inter_sweep_current,
ysr@1580 2135 inter_sweep_estimate,
ysr@1580 2136 intra_sweep_estimate);
duke@435 2137 }
duke@435 2138
duke@435 2139 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2140 assert_locked();
duke@435 2141 size_t i;
duke@435 2142 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2143 FreeList *fl = &_indexedFreeList[i];
duke@435 2144 fl->set_surplus(fl->count() -
ysr@1580 2145 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2146 }
duke@435 2147 }
duke@435 2148
duke@435 2149 void CompactibleFreeListSpace::setFLHints() {
duke@435 2150 assert_locked();
duke@435 2151 size_t i;
duke@435 2152 size_t h = IndexSetSize;
duke@435 2153 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2154 FreeList *fl = &_indexedFreeList[i];
duke@435 2155 fl->set_hint(h);
duke@435 2156 if (fl->surplus() > 0) {
duke@435 2157 h = i;
duke@435 2158 }
duke@435 2159 }
duke@435 2160 }
duke@435 2161
duke@435 2162 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2163 assert_locked();
duke@435 2164 int i;
duke@435 2165 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2166 FreeList *fl = &_indexedFreeList[i];
duke@435 2167 fl->set_prevSweep(fl->count());
duke@435 2168 fl->set_coalBirths(0);
duke@435 2169 fl->set_coalDeaths(0);
duke@435 2170 fl->set_splitBirths(0);
duke@435 2171 fl->set_splitDeaths(0);
duke@435 2172 }
duke@435 2173 }
duke@435 2174
ysr@447 2175 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2176 if (PrintFLSStatistics > 0) {
ysr@1580 2177 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
ysr@1580 2178 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2179 largestAddr);
ysr@1580 2180 }
duke@435 2181 setFLSurplus();
duke@435 2182 setFLHints();
duke@435 2183 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2184 printFLCensus(sweep_count);
duke@435 2185 }
duke@435 2186 clearFLCensus();
duke@435 2187 assert_locked();
ysr@1580 2188 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
duke@435 2189 }
duke@435 2190
duke@435 2191 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2192 if (size < SmallForDictionary) {
duke@435 2193 FreeList *fl = &_indexedFreeList[size];
duke@435 2194 return (fl->coalDesired() < 0) ||
duke@435 2195 ((int)fl->count() > fl->coalDesired());
duke@435 2196 } else {
duke@435 2197 return dictionary()->coalDictOverPopulated(size);
duke@435 2198 }
duke@435 2199 }
duke@435 2200
duke@435 2201 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2202 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2203 FreeList *fl = &_indexedFreeList[size];
duke@435 2204 fl->increment_coalBirths();
duke@435 2205 fl->increment_surplus();
duke@435 2206 }
duke@435 2207
duke@435 2208 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2209 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2210 FreeList *fl = &_indexedFreeList[size];
duke@435 2211 fl->increment_coalDeaths();
duke@435 2212 fl->decrement_surplus();
duke@435 2213 }
duke@435 2214
duke@435 2215 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2216 if (size < SmallForDictionary) {
duke@435 2217 smallCoalBirth(size);
duke@435 2218 } else {
duke@435 2219 dictionary()->dictCensusUpdate(size,
duke@435 2220 false /* split */,
duke@435 2221 true /* birth */);
duke@435 2222 }
duke@435 2223 }
duke@435 2224
duke@435 2225 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2226 if(size < SmallForDictionary) {
duke@435 2227 smallCoalDeath(size);
duke@435 2228 } else {
duke@435 2229 dictionary()->dictCensusUpdate(size,
duke@435 2230 false /* split */,
duke@435 2231 false /* birth */);
duke@435 2232 }
duke@435 2233 }
duke@435 2234
duke@435 2235 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2236 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2237 FreeList *fl = &_indexedFreeList[size];
duke@435 2238 fl->increment_splitBirths();
duke@435 2239 fl->increment_surplus();
duke@435 2240 }
duke@435 2241
duke@435 2242 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2243 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2244 FreeList *fl = &_indexedFreeList[size];
duke@435 2245 fl->increment_splitDeaths();
duke@435 2246 fl->decrement_surplus();
duke@435 2247 }
duke@435 2248
duke@435 2249 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2250 if (size < SmallForDictionary) {
duke@435 2251 smallSplitBirth(size);
duke@435 2252 } else {
duke@435 2253 dictionary()->dictCensusUpdate(size,
duke@435 2254 true /* split */,
duke@435 2255 true /* birth */);
duke@435 2256 }
duke@435 2257 }
duke@435 2258
duke@435 2259 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2260 if (size < SmallForDictionary) {
duke@435 2261 smallSplitDeath(size);
duke@435 2262 } else {
duke@435 2263 dictionary()->dictCensusUpdate(size,
duke@435 2264 true /* split */,
duke@435 2265 false /* birth */);
duke@435 2266 }
duke@435 2267 }
duke@435 2268
duke@435 2269 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2270 size_t to2 = from - to1;
duke@435 2271 splitDeath(from);
duke@435 2272 splitBirth(to1);
duke@435 2273 splitBirth(to2);
duke@435 2274 }
duke@435 2275
duke@435 2276 void CompactibleFreeListSpace::print() const {
duke@435 2277 tty->print(" CompactibleFreeListSpace");
duke@435 2278 Space::print();
duke@435 2279 }
duke@435 2280
duke@435 2281 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2282 assert_locked();
duke@435 2283 repairLinearAllocationBlocks();
duke@435 2284 // Verify that the SpoolBlocks look like free blocks of
duke@435 2285 // appropriate sizes... To be done ...
duke@435 2286 }
duke@435 2287
duke@435 2288 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2289 private:
duke@435 2290 const CompactibleFreeListSpace* _sp;
duke@435 2291 const MemRegion _span;
duke@435 2292
duke@435 2293 public:
duke@435 2294 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
duke@435 2295 MemRegion span) : _sp(sp), _span(span) { }
duke@435 2296
coleenp@548 2297 virtual size_t do_blk(HeapWord* addr) {
duke@435 2298 size_t res;
duke@435 2299 if (_sp->block_is_obj(addr)) {
duke@435 2300 oop p = oop(addr);
duke@435 2301 guarantee(p->is_oop(), "Should be an oop");
duke@435 2302 res = _sp->adjustObjectSize(p->size());
duke@435 2303 if (_sp->obj_is_alive(addr)) {
duke@435 2304 p->verify();
duke@435 2305 }
duke@435 2306 } else {
duke@435 2307 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2308 res = fc->size();
duke@435 2309 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2310 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2311 "Chunk should be on a free list");
duke@435 2312 }
duke@435 2313 }
duke@435 2314 guarantee(res != 0, "Livelock: no rank reduction!");
duke@435 2315 return res;
duke@435 2316 }
duke@435 2317 };
duke@435 2318
duke@435 2319 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2320 private:
duke@435 2321 const CMSCollector* _collector;
duke@435 2322 const CompactibleFreeListSpace* _sp;
duke@435 2323 const MemRegion _span;
duke@435 2324 const bool _past_remark;
duke@435 2325 const CMSBitMap* _bit_map;
duke@435 2326
coleenp@548 2327 protected:
coleenp@548 2328 void do_oop(void* p, oop obj) {
coleenp@548 2329 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2330 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2331 // Should be a valid object; the first disjunct below allows
coleenp@548 2332 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2333 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2334 // are spanned by _span (CMS heap) above.
coleenp@548 2335 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2336 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2337 "Should be an object");
coleenp@548 2338 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2339 obj->verify();
coleenp@548 2340 if (_past_remark) {
coleenp@548 2341 // Remark has been completed, the object should be marked
coleenp@548 2342 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2343 }
coleenp@548 2344 } else { // reference within CMS heap
coleenp@548 2345 if (_past_remark) {
coleenp@548 2346 // Remark has been completed -- so the referent should have
coleenp@548 2347 // been marked, if referring object is.
coleenp@548 2348 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2349 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2350 }
coleenp@548 2351 }
coleenp@548 2352 }
coleenp@548 2353 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2354 // the reference is from FLS, and points out of FLS
coleenp@548 2355 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2356 obj->verify();
coleenp@548 2357 }
coleenp@548 2358 }
coleenp@548 2359
coleenp@548 2360 template <class T> void do_oop_work(T* p) {
coleenp@548 2361 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2362 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2363 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2364 do_oop(p, obj);
coleenp@548 2365 }
coleenp@548 2366 }
coleenp@548 2367
duke@435 2368 public:
duke@435 2369 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2370 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2371 bool past_remark, CMSBitMap* bit_map) :
duke@435 2372 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2373 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2374
coleenp@548 2375 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2376 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2377 };
duke@435 2378
duke@435 2379 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2380 assert_lock_strong(&_freelistLock);
duke@435 2381 verify_objects_initialized();
duke@435 2382 MemRegion span = _collector->_span;
duke@435 2383 bool past_remark = (_collector->abstract_state() ==
duke@435 2384 CMSCollector::Sweeping);
duke@435 2385
duke@435 2386 ResourceMark rm;
duke@435 2387 HandleMark hm;
duke@435 2388
duke@435 2389 // Check integrity of CFL data structures
duke@435 2390 _promoInfo.verify();
duke@435 2391 _dictionary->verify();
duke@435 2392 if (FLSVerifyIndexTable) {
duke@435 2393 verifyIndexedFreeLists();
duke@435 2394 }
duke@435 2395 // Check integrity of all objects and free blocks in space
duke@435 2396 {
duke@435 2397 VerifyAllBlksClosure cl(this, span);
duke@435 2398 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2399 }
duke@435 2400 // Check that all references in the heap to FLS
duke@435 2401 // are to valid objects in FLS or that references in
duke@435 2402 // FLS are to valid objects elsewhere in the heap
duke@435 2403 if (FLSVerifyAllHeapReferences)
duke@435 2404 {
duke@435 2405 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2406 _collector->markBitMap());
duke@435 2407 CollectedHeap* ch = Universe::heap();
duke@435 2408 ch->oop_iterate(&cl); // all oops in generations
duke@435 2409 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2410 }
duke@435 2411
duke@435 2412 if (VerifyObjectStartArray) {
duke@435 2413 // Verify the block offset table
duke@435 2414 _bt.verify();
duke@435 2415 }
duke@435 2416 }
duke@435 2417
duke@435 2418 #ifndef PRODUCT
duke@435 2419 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2420 if (FLSVerifyLists) {
duke@435 2421 _dictionary->verify();
duke@435 2422 verifyIndexedFreeLists();
duke@435 2423 } else {
duke@435 2424 if (FLSVerifyDictionary) {
duke@435 2425 _dictionary->verify();
duke@435 2426 }
duke@435 2427 if (FLSVerifyIndexTable) {
duke@435 2428 verifyIndexedFreeLists();
duke@435 2429 }
duke@435 2430 }
duke@435 2431 }
duke@435 2432 #endif
duke@435 2433
duke@435 2434 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2435 size_t i = 0;
duke@435 2436 for (; i < MinChunkSize; i++) {
duke@435 2437 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2438 }
duke@435 2439 for (; i < IndexSetSize; i++) {
duke@435 2440 verifyIndexedFreeList(i);
duke@435 2441 }
duke@435 2442 }
duke@435 2443
duke@435 2444 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2445 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2446 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2447 size_t num = _indexedFreeList[size].count();
ysr@1580 2448 size_t n = 0;
ysr@777 2449 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
ysr@1580 2450 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2451 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2452 guarantee(fc->isFree(), "!free?");
duke@435 2453 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2454 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2455 }
ysr@1580 2456 guarantee(n == num, "Incorrect count");
duke@435 2457 }
duke@435 2458
duke@435 2459 #ifndef PRODUCT
duke@435 2460 void CompactibleFreeListSpace::checkFreeListConsistency() const {
duke@435 2461 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2462 "Some sizes can't be allocated without recourse to"
duke@435 2463 " linear allocation buffers");
duke@435 2464 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2465 "else MIN_TREE_CHUNK_SIZE is wrong");
duke@435 2466 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
duke@435 2467 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
duke@435 2468 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2469 "Some for-loops may be incorrectly initialized");
duke@435 2470 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2471 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2472 }
duke@435 2473 #endif
duke@435 2474
ysr@447 2475 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2476 assert_lock_strong(&_freelistLock);
ysr@447 2477 FreeList total;
ysr@447 2478 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2479 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2480 size_t totalFree = 0;
duke@435 2481 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2482 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2483 totalFree += fl->count() * fl->size();
ysr@447 2484 if (i % (40*IndexSetStride) == 0) {
ysr@447 2485 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2486 }
ysr@447 2487 fl->print_on(gclog_or_tty);
ysr@447 2488 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2489 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2490 total.set_desired( total.desired() + fl->desired() );
ysr@447 2491 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2492 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2493 total.set_count( total.count() + fl->count() );
ysr@447 2494 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2495 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2496 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2497 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2498 }
ysr@447 2499 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2500 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2501 SIZE_FORMAT " words", totalFree);
duke@435 2502 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2503 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2504 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2505 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2506 _dictionary->printDictCensus();
duke@435 2507 }
duke@435 2508
duke@435 2509 // Return the next displaced header, incrementing the pointer and
duke@435 2510 // recycling spool area as necessary.
duke@435 2511 markOop PromotionInfo::nextDisplacedHeader() {
duke@435 2512 assert(_spoolHead != NULL, "promotionInfo inconsistency");
duke@435 2513 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
duke@435 2514 "Empty spool space: no displaced header can be fetched");
duke@435 2515 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
duke@435 2516 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
duke@435 2517 // Spool forward
duke@435 2518 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
duke@435 2519 // forward to next block, recycling this block into spare spool buffer
duke@435 2520 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
duke@435 2521 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
duke@435 2522 _spoolHead->nextSpoolBlock = _spareSpool;
duke@435 2523 _spareSpool = _spoolHead;
duke@435 2524 _spoolHead = tmp;
duke@435 2525 _firstIndex = 1;
duke@435 2526 NOT_PRODUCT(
duke@435 2527 if (_spoolHead == NULL) { // all buffers fully consumed
duke@435 2528 assert(_spoolTail == NULL && _nextIndex == 1,
duke@435 2529 "spool buffers processing inconsistency");
duke@435 2530 }
duke@435 2531 )
duke@435 2532 }
duke@435 2533 return hdr;
duke@435 2534 }
duke@435 2535
duke@435 2536 void PromotionInfo::track(PromotedObject* trackOop) {
duke@435 2537 track(trackOop, oop(trackOop)->klass());
duke@435 2538 }
duke@435 2539
duke@435 2540 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
duke@435 2541 // make a copy of header as it may need to be spooled
duke@435 2542 markOop mark = oop(trackOop)->mark();
duke@435 2543 trackOop->clearNext();
duke@435 2544 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
duke@435 2545 // save non-prototypical header, and mark oop
duke@435 2546 saveDisplacedHeader(mark);
duke@435 2547 trackOop->setDisplacedMark();
duke@435 2548 } else {
duke@435 2549 // we'd like to assert something like the following:
duke@435 2550 // assert(mark == markOopDesc::prototype(), "consistency check");
duke@435 2551 // ... but the above won't work because the age bits have not (yet) been
duke@435 2552 // cleared. The remainder of the check would be identical to the
duke@435 2553 // condition checked in must_be_preserved() above, so we don't really
duke@435 2554 // have anything useful to check here!
duke@435 2555 }
duke@435 2556 if (_promoTail != NULL) {
duke@435 2557 assert(_promoHead != NULL, "List consistency");
duke@435 2558 _promoTail->setNext(trackOop);
duke@435 2559 _promoTail = trackOop;
duke@435 2560 } else {
duke@435 2561 assert(_promoHead == NULL, "List consistency");
duke@435 2562 _promoHead = _promoTail = trackOop;
duke@435 2563 }
duke@435 2564 // Mask as newly promoted, so we can skip over such objects
duke@435 2565 // when scanning dirty cards
duke@435 2566 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
duke@435 2567 trackOop->setPromotedMark();
duke@435 2568 }
duke@435 2569
duke@435 2570 // Save the given displaced header, incrementing the pointer and
duke@435 2571 // obtaining more spool area as necessary.
duke@435 2572 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
duke@435 2573 assert(_spoolHead != NULL && _spoolTail != NULL,
duke@435 2574 "promotionInfo inconsistency");
duke@435 2575 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
duke@435 2576 _spoolTail->displacedHdr[_nextIndex] = hdr;
duke@435 2577 // Spool forward
duke@435 2578 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
duke@435 2579 // get a new spooling block
duke@435 2580 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
duke@435 2581 _splice_point = _spoolTail; // save for splicing
duke@435 2582 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
duke@435 2583 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
duke@435 2584 // ... but will attempt filling before next promotion attempt
duke@435 2585 _nextIndex = 1;
duke@435 2586 }
duke@435 2587 }
duke@435 2588
duke@435 2589 // Ensure that spooling space exists. Return false if spooling space
duke@435 2590 // could not be obtained.
duke@435 2591 bool PromotionInfo::ensure_spooling_space_work() {
duke@435 2592 assert(!has_spooling_space(), "Only call when there is no spooling space");
duke@435 2593 // Try and obtain more spooling space
duke@435 2594 SpoolBlock* newSpool = getSpoolBlock();
duke@435 2595 assert(newSpool == NULL ||
duke@435 2596 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
duke@435 2597 "getSpoolBlock() sanity check");
duke@435 2598 if (newSpool == NULL) {
duke@435 2599 return false;
duke@435 2600 }
duke@435 2601 _nextIndex = 1;
duke@435 2602 if (_spoolTail == NULL) {
duke@435 2603 _spoolTail = newSpool;
duke@435 2604 if (_spoolHead == NULL) {
duke@435 2605 _spoolHead = newSpool;
duke@435 2606 _firstIndex = 1;
duke@435 2607 } else {
duke@435 2608 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
duke@435 2609 "Splice point invariant");
duke@435 2610 // Extra check that _splice_point is connected to list
duke@435 2611 #ifdef ASSERT
duke@435 2612 {
duke@435 2613 SpoolBlock* blk = _spoolHead;
duke@435 2614 for (; blk->nextSpoolBlock != NULL;
duke@435 2615 blk = blk->nextSpoolBlock);
duke@435 2616 assert(blk != NULL && blk == _splice_point,
duke@435 2617 "Splice point incorrect");
duke@435 2618 }
duke@435 2619 #endif // ASSERT
duke@435 2620 _splice_point->nextSpoolBlock = newSpool;
duke@435 2621 }
duke@435 2622 } else {
duke@435 2623 assert(_spoolHead != NULL, "spool list consistency");
duke@435 2624 _spoolTail->nextSpoolBlock = newSpool;
duke@435 2625 _spoolTail = newSpool;
duke@435 2626 }
duke@435 2627 return true;
duke@435 2628 }
duke@435 2629
duke@435 2630 // Get a free spool buffer from the free pool, getting a new block
duke@435 2631 // from the heap if necessary.
duke@435 2632 SpoolBlock* PromotionInfo::getSpoolBlock() {
duke@435 2633 SpoolBlock* res;
duke@435 2634 if ((res = _spareSpool) != NULL) {
duke@435 2635 _spareSpool = _spareSpool->nextSpoolBlock;
duke@435 2636 res->nextSpoolBlock = NULL;
duke@435 2637 } else { // spare spool exhausted, get some from heap
duke@435 2638 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
duke@435 2639 if (res != NULL) {
duke@435 2640 res->init();
duke@435 2641 }
duke@435 2642 }
duke@435 2643 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
duke@435 2644 return res;
duke@435 2645 }
duke@435 2646
duke@435 2647 void PromotionInfo::startTrackingPromotions() {
duke@435 2648 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2649 "spooling inconsistency?");
duke@435 2650 _firstIndex = _nextIndex = 1;
duke@435 2651 _tracking = true;
duke@435 2652 }
duke@435 2653
ysr@1580 2654 #define CMSPrintPromoBlockInfo 1
ysr@1580 2655
ysr@1580 2656 void PromotionInfo::stopTrackingPromotions(uint worker_id) {
duke@435 2657 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2658 "spooling inconsistency?");
duke@435 2659 _firstIndex = _nextIndex = 1;
duke@435 2660 _tracking = false;
ysr@1580 2661 if (CMSPrintPromoBlockInfo > 1) {
ysr@1580 2662 print_statistics(worker_id);
ysr@1580 2663 }
ysr@1580 2664 }
ysr@1580 2665
ysr@1580 2666 void PromotionInfo::print_statistics(uint worker_id) const {
ysr@1580 2667 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
ysr@1580 2668 "Else will undercount");
ysr@1580 2669 assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
ysr@1580 2670 // Count the number of blocks and slots in the free pool
ysr@1580 2671 size_t slots = 0;
ysr@1580 2672 size_t blocks = 0;
ysr@1580 2673 for (SpoolBlock* cur_spool = _spareSpool;
ysr@1580 2674 cur_spool != NULL;
ysr@1580 2675 cur_spool = cur_spool->nextSpoolBlock) {
ysr@1580 2676 // the first entry is just a self-pointer; indices 1 through
ysr@1580 2677 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
ysr@1580 2678 guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
ysr@1580 2679 "first entry of displacedHdr should be self-referential");
ysr@1580 2680 slots += cur_spool->bufferSize - 1;
ysr@1580 2681 blocks++;
ysr@1580 2682 }
ysr@1580 2683 if (_spoolHead != NULL) {
ysr@1580 2684 slots += _spoolHead->bufferSize - 1;
ysr@1580 2685 blocks++;
ysr@1580 2686 }
ysr@1580 2687 gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
ysr@1580 2688 worker_id, blocks, slots);
duke@435 2689 }
duke@435 2690
duke@435 2691 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
duke@435 2692 // points to the next slot available for filling.
duke@435 2693 // The set of slots holding displaced headers are then all those in the
duke@435 2694 // right-open interval denoted by:
duke@435 2695 //
duke@435 2696 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
duke@435 2697 //
duke@435 2698 // When _spoolTail is NULL, then the set of slots with displaced headers
duke@435 2699 // is all those starting at the slot <_spoolHead, _firstIndex> and
duke@435 2700 // going up to the last slot of last block in the linked list.
duke@435 2701 // In this lartter case, _splice_point points to the tail block of
duke@435 2702 // this linked list of blocks holding displaced headers.
duke@435 2703 void PromotionInfo::verify() const {
duke@435 2704 // Verify the following:
duke@435 2705 // 1. the number of displaced headers matches the number of promoted
duke@435 2706 // objects that have displaced headers
duke@435 2707 // 2. each promoted object lies in this space
duke@435 2708 debug_only(
duke@435 2709 PromotedObject* junk = NULL;
duke@435 2710 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
duke@435 2711 "Offset of PromotedObject::_next is expected to align with "
duke@435 2712 " the OopDesc::_mark within OopDesc");
duke@435 2713 )
duke@435 2714 // FIXME: guarantee????
duke@435 2715 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
duke@435 2716 _splice_point != NULL, "list consistency");
duke@435 2717 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
duke@435 2718 // count the number of objects with displaced headers
duke@435 2719 size_t numObjsWithDisplacedHdrs = 0;
duke@435 2720 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
duke@435 2721 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
duke@435 2722 // the last promoted object may fail the mark() != NULL test of is_oop().
duke@435 2723 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
duke@435 2724 if (curObj->hasDisplacedMark()) {
duke@435 2725 numObjsWithDisplacedHdrs++;
duke@435 2726 }
duke@435 2727 }
duke@435 2728 // Count the number of displaced headers
duke@435 2729 size_t numDisplacedHdrs = 0;
duke@435 2730 for (SpoolBlock* curSpool = _spoolHead;
duke@435 2731 curSpool != _spoolTail && curSpool != NULL;
duke@435 2732 curSpool = curSpool->nextSpoolBlock) {
duke@435 2733 // the first entry is just a self-pointer; indices 1 through
duke@435 2734 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
duke@435 2735 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
duke@435 2736 "first entry of displacedHdr should be self-referential");
duke@435 2737 numDisplacedHdrs += curSpool->bufferSize - 1;
duke@435 2738 }
duke@435 2739 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
duke@435 2740 "internal consistency");
duke@435 2741 guarantee(_spoolTail != NULL || _nextIndex == 1,
duke@435 2742 "Inconsistency between _spoolTail and _nextIndex");
duke@435 2743 // We overcounted (_firstIndex-1) worth of slots in block
duke@435 2744 // _spoolHead and we undercounted (_nextIndex-1) worth of
duke@435 2745 // slots in block _spoolTail. We make an appropriate
duke@435 2746 // adjustment by subtracting the first and adding the
duke@435 2747 // second: - (_firstIndex - 1) + (_nextIndex - 1)
duke@435 2748 numDisplacedHdrs += (_nextIndex - _firstIndex);
duke@435 2749 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
duke@435 2750 }
duke@435 2751
ysr@1580 2752 void PromotionInfo::print_on(outputStream* st) const {
ysr@1580 2753 SpoolBlock* curSpool = NULL;
ysr@1580 2754 size_t i = 0;
ysr@1580 2755 st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
ysr@1580 2756 _firstIndex, _nextIndex);
ysr@1580 2757 for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
ysr@1580 2758 curSpool = curSpool->nextSpoolBlock) {
ysr@1580 2759 curSpool->print_on(st);
ysr@1580 2760 st->print_cr(" active ");
ysr@1580 2761 i++;
ysr@1580 2762 }
ysr@1580 2763 for (curSpool = _spoolTail; curSpool != NULL;
ysr@1580 2764 curSpool = curSpool->nextSpoolBlock) {
ysr@1580 2765 curSpool->print_on(st);
ysr@1580 2766 st->print_cr(" inactive ");
ysr@1580 2767 i++;
ysr@1580 2768 }
ysr@1580 2769 for (curSpool = _spareSpool; curSpool != NULL;
ysr@1580 2770 curSpool = curSpool->nextSpoolBlock) {
ysr@1580 2771 curSpool->print_on(st);
ysr@1580 2772 st->print_cr(" free ");
ysr@1580 2773 i++;
ysr@1580 2774 }
ysr@1580 2775 st->print_cr(SIZE_FORMAT " header spooling blocks", i);
ysr@1580 2776 }
ysr@1580 2777
ysr@1580 2778 void SpoolBlock::print_on(outputStream* st) const {
ysr@1580 2779 st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
ysr@1580 2780 this, (HeapWord*)displacedHdr + bufferSize,
ysr@1580 2781 bufferSize, nextSpoolBlock);
ysr@1580 2782 }
ysr@1580 2783
ysr@1580 2784 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2785 // CFLS_LAB
ysr@1580 2786 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2787
ysr@1580 2788 #define VECTOR_257(x) \
ysr@1580 2789 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2790 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2791 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2792 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2793 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2794 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2795 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2796 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2797 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2798 x }
ysr@1580 2799
ysr@1580 2800 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2801 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2802 // command-line, this will get reinitialized via a call to
ysr@1580 2803 // modify_initialization() below.
ysr@1580 2804 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2805 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2806 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
ysr@1580 2807 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2808
duke@435 2809 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2810 _cfls(cfls)
duke@435 2811 {
ysr@1580 2812 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2813 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2814 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2815 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2816 _indexedFreeList[i].set_size(i);
ysr@1580 2817 _num_blocks[i] = 0;
ysr@1580 2818 }
ysr@1580 2819 }
ysr@1580 2820
ysr@1580 2821 static bool _CFLS_LAB_modified = false;
ysr@1580 2822
ysr@1580 2823 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2824 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2825 _CFLS_LAB_modified = true;
ysr@1580 2826 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2827 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2828 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2829 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2830 }
duke@435 2831 }
duke@435 2832
duke@435 2833 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2834 FreeChunk* res;
duke@435 2835 word_sz = _cfls->adjustObjectSize(word_sz);
duke@435 2836 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2837 // This locking manages sync with other large object allocations.
duke@435 2838 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2839 Mutex::_no_safepoint_check_flag);
duke@435 2840 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2841 if (res == NULL) return NULL;
duke@435 2842 } else {
duke@435 2843 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2844 if (fl->count() == 0) {
duke@435 2845 // Attempt to refill this local free list.
ysr@1580 2846 get_from_global_pool(word_sz, fl);
duke@435 2847 // If it didn't work, give up.
duke@435 2848 if (fl->count() == 0) return NULL;
duke@435 2849 }
duke@435 2850 res = fl->getChunkAtHead();
duke@435 2851 assert(res != NULL, "Why was count non-zero?");
duke@435 2852 }
duke@435 2853 res->markNotFree();
duke@435 2854 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2855 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2856 // mangle a just allocated object with a distinct pattern.
duke@435 2857 debug_only(res->mangleAllocated(word_sz));
duke@435 2858 return (HeapWord*)res;
duke@435 2859 }
duke@435 2860
ysr@1580 2861 // Get a chunk of blocks of the right size and update related
ysr@1580 2862 // book-keeping stats
ysr@1580 2863 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
ysr@1580 2864 // Get the #blocks we want to claim
ysr@1580 2865 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2866 assert(n_blks > 0, "Error");
ysr@1580 2867 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2868 // In some cases, when the application has a phase change,
ysr@1580 2869 // there may be a sudden and sharp shift in the object survival
ysr@1580 2870 // profile, and updating the counts at the end of a scavenge
ysr@1580 2871 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2872 // during these phase changes. It is beneficial to detect such
ysr@1580 2873 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2874 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2875 // It is protected by a product flag until we have gained
ysr@1580 2876 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2877 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2878 // small spikes, so some kind of historical smoothing based on
ysr@1580 2879 // previous experience with the greater reactivity might be useful.
ysr@1580 2880 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2881 // default.
ysr@1580 2882 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2883 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2884 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2885 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2886 }
ysr@1580 2887 assert(n_blks > 0, "Error");
ysr@1580 2888 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2889 // Update stats table entry for this block size
ysr@1580 2890 _num_blocks[word_sz] += fl->count();
ysr@1580 2891 }
ysr@1580 2892
ysr@1580 2893 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2894 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2895 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2896 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2897 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2898 "Counter inconsistency");
ysr@1580 2899 if (_global_num_workers[i] > 0) {
ysr@1580 2900 // Need to smooth wrt historical average
ysr@1580 2901 if (ResizeOldPLAB) {
ysr@1580 2902 _blocks_to_claim[i].sample(
ysr@1580 2903 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2904 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2905 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2906 }
ysr@1580 2907 // Reset counters for next round
ysr@1580 2908 _global_num_workers[i] = 0;
ysr@1580 2909 _global_num_blocks[i] = 0;
ysr@1580 2910 if (PrintOldPLAB) {
ysr@1580 2911 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2912 }
duke@435 2913 }
duke@435 2914 }
duke@435 2915 }
duke@435 2916
ysr@1580 2917 void CFLS_LAB::retire(int tid) {
ysr@1580 2918 // We run this single threaded with the world stopped;
ysr@1580 2919 // so no need for locks and such.
ysr@1580 2920 #define CFLS_LAB_PARALLEL_ACCESS 0
ysr@1580 2921 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2922 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2923 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
ysr@1580 2924 "Will access to uninitialized slot below");
ysr@1580 2925 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2926 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
ysr@1580 2927 i > 0;
ysr@1580 2928 i -= CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2929 #else // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2930 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2931 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2932 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2933 #endif // !CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2934 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2935 "Can't retire more than what we obtained");
ysr@1580 2936 if (_num_blocks[i] > 0) {
ysr@1580 2937 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2938 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2939 {
ysr@1580 2940 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2941 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@1580 2942 Mutex::_no_safepoint_check_flag);
ysr@1580 2943 #endif // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2944 // Update globals stats for num_blocks used
ysr@1580 2945 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2946 _global_num_workers[i]++;
ysr@1580 2947 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
ysr@1580 2948 if (num_retire > 0) {
ysr@1580 2949 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2950 // Reset this list.
ysr@1580 2951 _indexedFreeList[i] = FreeList();
ysr@1580 2952 _indexedFreeList[i].set_size(i);
ysr@1580 2953 }
ysr@1580 2954 }
ysr@1580 2955 if (PrintOldPLAB) {
ysr@1580 2956 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2957 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2958 }
ysr@1580 2959 // Reset stats for next round
ysr@1580 2960 _num_blocks[i] = 0;
ysr@1580 2961 }
ysr@1580 2962 }
ysr@1580 2963 }
ysr@1580 2964
ysr@1580 2965 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2966 assert(fl->count() == 0, "Precondition.");
duke@435 2967 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2968 "Precondition");
duke@435 2969
ysr@1580 2970 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2971 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2972 // then try getting a big chunk and splitting it.
ysr@1580 2973 {
ysr@1580 2974 bool found;
ysr@1580 2975 int k;
ysr@1580 2976 size_t cur_sz;
ysr@1580 2977 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2978 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2979 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2980 k++, cur_sz = k * word_sz) {
ysr@1580 2981 FreeList* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2982 FreeList fl_for_cur_sz; // Empty.
ysr@1580 2983 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2984 {
ysr@1580 2985 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2986 Mutex::_no_safepoint_check_flag);
ysr@1580 2987 if (gfl->count() != 0) {
ysr@1580 2988 // nn is the number of chunks of size cur_sz that
ysr@1580 2989 // we'd need to split k-ways each, in order to create
ysr@1580 2990 // "n" chunks of size word_sz each.
ysr@1580 2991 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2992 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2993 found = true;
ysr@1580 2994 if (k > 1) {
ysr@1580 2995 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2996 // we increment the split death count by the number of blocks
ysr@1580 2997 // we just took from the cur_sz-size blocks list and which
ysr@1580 2998 // we will be splitting below.
ysr@1580 2999 ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
ysr@1580 3000 fl_for_cur_sz.count();
ysr@1580 3001 _indexedFreeList[cur_sz].set_splitDeaths(deaths);
ysr@1580 3002 }
ysr@1580 3003 }
ysr@1580 3004 }
ysr@1580 3005 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 3006 if (found) {
ysr@1580 3007 if (k == 1) {
ysr@1580 3008 fl->prepend(&fl_for_cur_sz);
ysr@1580 3009 } else {
ysr@1580 3010 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 3011 FreeChunk* fc;
ysr@1580 3012 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
ysr@1580 3013 // Must do this in reverse order, so that anybody attempting to
ysr@1580 3014 // access the main chunk sees it as a single free block until we
ysr@1580 3015 // change it.
ysr@1580 3016 size_t fc_size = fc->size();
ysr@1580 3017 for (int i = k-1; i >= 0; i--) {
ysr@1580 3018 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@1580 3019 ffc->setSize(word_sz);
ysr@1580 3020 ffc->linkNext(NULL);
ysr@1580 3021 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ysr@1580 3022 // Above must occur before BOT is updated below.
ysr@1580 3023 // splitting from the right, fc_size == (k - i + 1) * wordsize
ysr@1580 3024 _bt.mark_block((HeapWord*)ffc, word_sz);
ysr@1580 3025 fc_size -= word_sz;
ysr@1580 3026 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
ysr@1580 3027 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@1580 3028 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
ysr@1580 3029 // Push this on "fl".
ysr@1580 3030 fl->returnChunkAtHead(ffc);
ysr@1580 3031 }
ysr@1580 3032 // TRAP
ysr@1580 3033 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 3034 }
ysr@1580 3035 }
ysr@1580 3036 // Update birth stats for this block size.
ysr@1580 3037 size_t num = fl->count();
ysr@1580 3038 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 3039 Mutex::_no_safepoint_check_flag);
ysr@1580 3040 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
ysr@1580 3041 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 3042 return;
duke@435 3043 }
duke@435 3044 }
duke@435 3045 }
duke@435 3046 // Otherwise, we'll split a block from the dictionary.
duke@435 3047 FreeChunk* fc = NULL;
duke@435 3048 FreeChunk* rem_fc = NULL;
duke@435 3049 size_t rem;
duke@435 3050 {
duke@435 3051 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 3052 Mutex::_no_safepoint_check_flag);
duke@435 3053 while (n > 0) {
duke@435 3054 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 3055 _dictionary->minSize()),
duke@435 3056 FreeBlockDictionary::atLeast);
duke@435 3057 if (fc != NULL) {
duke@435 3058 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
duke@435 3059 dictionary()->dictCensusUpdate(fc->size(),
duke@435 3060 true /*split*/,
duke@435 3061 false /*birth*/);
duke@435 3062 break;
duke@435 3063 } else {
duke@435 3064 n--;
duke@435 3065 }
duke@435 3066 }
duke@435 3067 if (fc == NULL) return;
ysr@1580 3068 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 3069 // Otherwise, split up that block.
ysr@1580 3070 const size_t nn = fc->size() / word_sz;
duke@435 3071 n = MIN2(nn, n);
ysr@1580 3072 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 3073 rem = fc->size() - n * word_sz;
duke@435 3074 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 3075 if (rem > 0 && rem < MinChunkSize) {
duke@435 3076 n--; rem += word_sz;
duke@435 3077 }
jmasa@1583 3078 // Note that at this point we may have n == 0.
jmasa@1583 3079 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 3080
jmasa@1583 3081 // If n is 0, the chunk fc that was found is not large
jmasa@1583 3082 // enough to leave a viable remainder. We are unable to
jmasa@1583 3083 // allocate even one block. Return fc to the
jmasa@1583 3084 // dictionary and return, leaving "fl" empty.
jmasa@1583 3085 if (n == 0) {
jmasa@1583 3086 returnChunkToDictionary(fc);
jmasa@1583 3087 return;
jmasa@1583 3088 }
jmasa@1583 3089
duke@435 3090 // First return the remainder, if any.
duke@435 3091 // Note that we hold the lock until we decide if we're going to give
ysr@1580 3092 // back the remainder to the dictionary, since a concurrent allocation
duke@435 3093 // may otherwise see the heap as empty. (We're willing to take that
duke@435 3094 // hit if the block is a small block.)
duke@435 3095 if (rem > 0) {
duke@435 3096 size_t prefix_size = n * word_sz;
duke@435 3097 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 3098 rem_fc->setSize(rem);
duke@435 3099 rem_fc->linkNext(NULL);
duke@435 3100 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 3101 // Above must occur before BOT is updated below.
ysr@1580 3102 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
duke@435 3103 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
duke@435 3104 if (rem >= IndexSetSize) {
duke@435 3105 returnChunkToDictionary(rem_fc);
ysr@1580 3106 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
duke@435 3107 rem_fc = NULL;
duke@435 3108 }
duke@435 3109 // Otherwise, return it to the small list below.
duke@435 3110 }
duke@435 3111 }
duke@435 3112 if (rem_fc != NULL) {
duke@435 3113 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 3114 Mutex::_no_safepoint_check_flag);
duke@435 3115 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 3116 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 3117 smallSplitBirth(rem);
duke@435 3118 }
ysr@1580 3119 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 3120 // Now do the splitting up.
duke@435 3121 // Must do this in reverse order, so that anybody attempting to
duke@435 3122 // access the main chunk sees it as a single free block until we
duke@435 3123 // change it.
duke@435 3124 size_t fc_size = n * word_sz;
duke@435 3125 // All but first chunk in this loop
duke@435 3126 for (ssize_t i = n-1; i > 0; i--) {
duke@435 3127 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 3128 ffc->setSize(word_sz);
duke@435 3129 ffc->linkNext(NULL);
duke@435 3130 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 3131 // Above must occur before BOT is updated below.
duke@435 3132 // splitting from the right, fc_size == (n - i + 1) * wordsize
duke@435 3133 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 3134 fc_size -= word_sz;
duke@435 3135 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 3136 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 3137 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 3138 // Push this on "fl".
duke@435 3139 fl->returnChunkAtHead(ffc);
duke@435 3140 }
duke@435 3141 // First chunk
duke@435 3142 fc->setSize(word_sz);
duke@435 3143 fc->linkNext(NULL);
duke@435 3144 fc->linkPrev(NULL);
duke@435 3145 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 3146 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 3147 fl->returnChunkAtHead(fc);
duke@435 3148
ysr@1580 3149 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 3150 {
ysr@1580 3151 // Update the stats for this block size.
duke@435 3152 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 3153 Mutex::_no_safepoint_check_flag);
ysr@1580 3154 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
ysr@1580 3155 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 3156 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 3157 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 3158 }
duke@435 3159
duke@435 3160 // TRAP
duke@435 3161 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 3162 }
duke@435 3163
duke@435 3164 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3165 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 3166 // XXX Need to suitably abstract and generalize this and the next
duke@435 3167 // method into one.
duke@435 3168 void
duke@435 3169 CompactibleFreeListSpace::
duke@435 3170 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 3171 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3172 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3173 const size_t task_size = rescan_task_size();
duke@435 3174 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 3175 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 3176 assert(n_tasks == 0 ||
ysr@775 3177 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 3178 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 3179 "n_tasks calculation incorrect");
duke@435 3180 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3181 assert(!pst->valid(), "Clobbering existing data?");
duke@435 3182 pst->set_par_threads(n_threads);
duke@435 3183 pst->set_n_tasks((int)n_tasks);
duke@435 3184 }
duke@435 3185
duke@435 3186 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3187 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 3188 void
duke@435 3189 CompactibleFreeListSpace::
duke@435 3190 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 3191 HeapWord* low) {
duke@435 3192 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3193 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3194 const size_t task_size = marking_task_size();
duke@435 3195 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3196 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3197 "Otherwise arithmetic below would be incorrect");
duke@435 3198 MemRegion span = _gen->reserved();
duke@435 3199 if (low != NULL) {
duke@435 3200 if (span.contains(low)) {
duke@435 3201 // Align low down to a card boundary so that
duke@435 3202 // we can use block_offset_careful() on span boundaries.
duke@435 3203 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3204 CardTableModRefBS::card_size);
duke@435 3205 // Clip span prefix at aligned_low
duke@435 3206 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3207 } else if (low > span.end()) {
duke@435 3208 span = MemRegion(low, low); // Null region
duke@435 3209 } // else use entire span
duke@435 3210 }
duke@435 3211 assert(span.is_empty() ||
duke@435 3212 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3213 "span should start at a card boundary");
duke@435 3214 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3215 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3216 assert(n_tasks == 0 ||
duke@435 3217 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3218 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3219 "n_tasks calculation incorrect");
duke@435 3220 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3221 assert(!pst->valid(), "Clobbering existing data?");
duke@435 3222 pst->set_par_threads(n_threads);
duke@435 3223 pst->set_n_tasks((int)n_tasks);
duke@435 3224 }

mercurial