src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Wed, 02 Jun 2010 22:45:42 -0700

author
jrose
date
Wed, 02 Jun 2010 22:45:42 -0700
changeset 1934
e9ff18c4ace7
parent 1907
c18cbe5936b8
parent 1926
2d127394260e
child 2071
be3f9c242c9d
permissions
-rw-r--r--

Merge

duke@435 1 /*
trims@1907 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
duke@435 27
duke@435 28 /////////////////////////////////////////////////////////////////////////
duke@435 29 //// CompactibleFreeListSpace
duke@435 30 /////////////////////////////////////////////////////////////////////////
duke@435 31
duke@435 32 // highest ranked free list lock rank
duke@435 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 34
kvn@1926 35 // Defaults are 0 so things will break badly if incorrectly initialized.
kvn@1926 36 int CompactibleFreeListSpace::IndexSetStart = 0;
kvn@1926 37 int CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 38
kvn@1926 39 size_t MinChunkSize = 0;
kvn@1926 40
kvn@1926 41 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 42 // Set CMS global values
kvn@1926 43 assert(MinChunkSize == 0, "already set");
kvn@1926 44 #define numQuanta(x,y) ((x+y-1)/y)
kvn@1926 45 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
kvn@1926 46
kvn@1926 47 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
kvn@1926 48 IndexSetStart = MinObjAlignment;
kvn@1926 49 IndexSetStride = MinObjAlignment;
kvn@1926 50 }
kvn@1926 51
duke@435 52 // Constructor
duke@435 53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 54 MemRegion mr, bool use_adaptive_freelists,
duke@435 55 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 56 _dictionaryChoice(dictionaryChoice),
duke@435 57 _adaptive_freelists(use_adaptive_freelists),
duke@435 58 _bt(bs, mr),
duke@435 59 // free list locks are in the range of values taken by _lockRank
duke@435 60 // This range currently is [_leaf+2, _leaf+3]
duke@435 61 // Note: this requires that CFLspace c'tors
duke@435 62 // are called serially in the order in which the locks are
duke@435 63 // are acquired in the program text. This is true today.
duke@435 64 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 65 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 66 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 67 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 68 CMSRescanMultiple),
duke@435 69 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 70 CMSConcMarkMultiple),
duke@435 71 _collector(NULL)
duke@435 72 {
duke@435 73 _bt.set_space(this);
jmasa@698 74 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 75 // We have all of "mr", all of which we place in the dictionary
duke@435 76 // as one big chunk. We'll need to decide here which of several
duke@435 77 // possible alternative dictionary implementations to use. For
duke@435 78 // now the choice is easy, since we have only one working
duke@435 79 // implementation, namely, the simple binary tree (splaying
duke@435 80 // temporarily disabled).
duke@435 81 switch (dictionaryChoice) {
duke@435 82 case FreeBlockDictionary::dictionarySplayTree:
duke@435 83 case FreeBlockDictionary::dictionarySkipList:
duke@435 84 default:
duke@435 85 warning("dictionaryChoice: selected option not understood; using"
duke@435 86 " default BinaryTreeDictionary implementation instead.");
ysr@1580 87 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 88 _dictionary = new BinaryTreeDictionary(mr);
duke@435 89 break;
duke@435 90 }
duke@435 91 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 92 // The indexed free lists are initially all empty and are lazily
duke@435 93 // filled in on demand. Initialize the array elements to NULL.
duke@435 94 initializeIndexedFreeListArray();
duke@435 95
duke@435 96 // Not using adaptive free lists assumes that allocation is first
duke@435 97 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 98 // has to have the klass's klassKlass allocated at a lower
duke@435 99 // address in the heap than the klass so that the klassKlass is
duke@435 100 // moved to its new location before the klass is moved.
duke@435 101 // Set the _refillSize for the linear allocation blocks
duke@435 102 if (!use_adaptive_freelists) {
duke@435 103 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 104 // The small linAB initially has all the space and will allocate
duke@435 105 // a chunk of any size.
duke@435 106 HeapWord* addr = (HeapWord*) fc;
duke@435 107 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 108 1024*SmallForLinearAlloc, fc->size());
duke@435 109 // Note that _unallocated_block is not updated here.
duke@435 110 // Allocations from the linear allocation block should
duke@435 111 // update it.
duke@435 112 } else {
duke@435 113 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 114 SmallForLinearAlloc);
duke@435 115 }
duke@435 116 // CMSIndexedFreeListReplenish should be at least 1
duke@435 117 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 118 _promoInfo.setSpace(this);
duke@435 119 if (UseCMSBestFit) {
duke@435 120 _fitStrategy = FreeBlockBestFitFirst;
duke@435 121 } else {
duke@435 122 _fitStrategy = FreeBlockStrategyNone;
duke@435 123 }
duke@435 124 checkFreeListConsistency();
duke@435 125
duke@435 126 // Initialize locks for parallel case.
duke@435 127 if (ParallelGCThreads > 0) {
duke@435 128 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 129 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 130 "a freelist par lock",
duke@435 131 true);
duke@435 132 if (_indexedFreeListParLocks[i] == NULL)
duke@435 133 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 134 DEBUG_ONLY(
duke@435 135 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 136 )
duke@435 137 }
duke@435 138 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 139 }
duke@435 140 }
duke@435 141
duke@435 142 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 143 // update the block offset table. Removed initialize_threshold call because
duke@435 144 // CFLS does not use a block offset array for contiguous spaces.
duke@435 145 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 146 CompactPoint* cp, HeapWord* compact_top) {
duke@435 147 // q is alive
duke@435 148 // First check if we should switch compaction space
duke@435 149 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 150 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 151 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 152 "virtual adjustObjectSize_v() method is not correct");
duke@435 153 size_t adjusted_size = adjustObjectSize(size);
duke@435 154 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 155 "no small fragments allowed");
duke@435 156 assert(minimum_free_block_size() == MinChunkSize,
duke@435 157 "for de-virtualized reference below");
duke@435 158 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 159 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 160 adjusted_size != compaction_max_size) {
duke@435 161 do {
duke@435 162 // switch to next compaction space
duke@435 163 cp->space->set_compaction_top(compact_top);
duke@435 164 cp->space = cp->space->next_compaction_space();
duke@435 165 if (cp->space == NULL) {
duke@435 166 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 167 assert(cp->gen != NULL, "compaction must succeed");
duke@435 168 cp->space = cp->gen->first_compaction_space();
duke@435 169 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 170 }
duke@435 171 compact_top = cp->space->bottom();
duke@435 172 cp->space->set_compaction_top(compact_top);
duke@435 173 // The correct adjusted_size may not be the same as that for this method
duke@435 174 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 175 // Use the virtual method which is not used above to save the virtual
duke@435 176 // dispatch.
duke@435 177 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 178 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 179 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 180 } while (adjusted_size > compaction_max_size);
duke@435 181 }
duke@435 182
duke@435 183 // store the forwarding pointer into the mark word
duke@435 184 if ((HeapWord*)q != compact_top) {
duke@435 185 q->forward_to(oop(compact_top));
duke@435 186 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 187 } else {
duke@435 188 // if the object isn't moving we can just set the mark to the default
duke@435 189 // mark and handle it specially later on.
duke@435 190 q->init_mark();
duke@435 191 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 192 }
duke@435 193
coleenp@548 194 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 195 compact_top += adjusted_size;
duke@435 196
duke@435 197 // we need to update the offset table so that the beginnings of objects can be
duke@435 198 // found during scavenge. Note that we are updating the offset table based on
duke@435 199 // where the object will be once the compaction phase finishes.
duke@435 200
duke@435 201 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 202 // the compaction_top exceeds the current threshold but not for an
duke@435 203 // non-contiguous space.
duke@435 204 cp->threshold =
duke@435 205 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 206 return compact_top;
duke@435 207 }
duke@435 208
duke@435 209 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 210 // and use of single_block instead of alloc_block. The name here is not really
duke@435 211 // appropriate - maybe a more general name could be invented for both the
duke@435 212 // contiguous and noncontiguous spaces.
duke@435 213
duke@435 214 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 215 _bt.single_block(start, the_end);
duke@435 216 return end();
duke@435 217 }
duke@435 218
duke@435 219 // Initialize them to NULL.
duke@435 220 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 221 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 222 // Note that on platforms where objects are double word aligned,
duke@435 223 // the odd array elements are not used. It is convenient, however,
duke@435 224 // to map directly from the object size to the array element.
duke@435 225 _indexedFreeList[i].reset(IndexSetSize);
duke@435 226 _indexedFreeList[i].set_size(i);
duke@435 227 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 228 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 229 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 230 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 231 }
duke@435 232 }
duke@435 233
duke@435 234 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 235 for (int i = 1; i < IndexSetSize; i++) {
duke@435 236 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 237 "Indexed free list sizes are incorrect");
duke@435 238 _indexedFreeList[i].reset(IndexSetSize);
duke@435 239 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 240 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 241 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 242 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 243 }
duke@435 244 }
duke@435 245
duke@435 246 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 247 resetIndexedFreeListArray();
duke@435 248 dictionary()->reset();
duke@435 249 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 250 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 251 // Everything's allocated until proven otherwise.
duke@435 252 _bt.set_unallocated_block(end());
duke@435 253 }
duke@435 254 if (!mr.is_empty()) {
duke@435 255 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 256 _bt.single_block(mr.start(), mr.word_size());
duke@435 257 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 258 fc->setSize(mr.word_size());
duke@435 259 if (mr.word_size() >= IndexSetSize ) {
duke@435 260 returnChunkToDictionary(fc);
duke@435 261 } else {
duke@435 262 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 263 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 264 }
duke@435 265 }
duke@435 266 _promoInfo.reset();
duke@435 267 _smallLinearAllocBlock._ptr = NULL;
duke@435 268 _smallLinearAllocBlock._word_size = 0;
duke@435 269 }
duke@435 270
duke@435 271 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 272 // Reset the space to the new reality - one free chunk.
duke@435 273 MemRegion mr(compaction_top(), end());
duke@435 274 reset(mr);
duke@435 275 // Now refill the linear allocation block(s) if possible.
duke@435 276 if (_adaptive_freelists) {
duke@435 277 refillLinearAllocBlocksIfNeeded();
duke@435 278 } else {
duke@435 279 // Place as much of mr in the linAB as we can get,
duke@435 280 // provided it was big enough to go into the dictionary.
duke@435 281 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 282 if (fc != NULL) {
duke@435 283 assert(fc->size() == mr.word_size(),
duke@435 284 "Why was the chunk broken up?");
duke@435 285 removeChunkFromDictionary(fc);
duke@435 286 HeapWord* addr = (HeapWord*) fc;
duke@435 287 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 288 1024*SmallForLinearAlloc, fc->size());
duke@435 289 // Note that _unallocated_block is not updated here.
duke@435 290 }
duke@435 291 }
duke@435 292 }
duke@435 293
duke@435 294 // Walks the entire dictionary, returning a coterminal
duke@435 295 // chunk, if it exists. Use with caution since it involves
duke@435 296 // a potentially complete walk of a potentially large tree.
duke@435 297 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 298
duke@435 299 assert_lock_strong(&_freelistLock);
duke@435 300
duke@435 301 return dictionary()->find_chunk_ends_at(end());
duke@435 302 }
duke@435 303
duke@435 304
duke@435 305 #ifndef PRODUCT
duke@435 306 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 307 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 308 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 309 }
duke@435 310 }
duke@435 311
duke@435 312 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 313 size_t sum = 0;
duke@435 314 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 315 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 316 }
duke@435 317 return sum;
duke@435 318 }
duke@435 319
duke@435 320 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 321 size_t count = 0;
kvn@1926 322 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
duke@435 323 debug_only(
duke@435 324 ssize_t total_list_count = 0;
duke@435 325 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 326 fc = fc->next()) {
duke@435 327 total_list_count++;
duke@435 328 }
duke@435 329 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 330 "Count in list is incorrect");
duke@435 331 )
duke@435 332 count += _indexedFreeList[i].count();
duke@435 333 }
duke@435 334 return count;
duke@435 335 }
duke@435 336
duke@435 337 size_t CompactibleFreeListSpace::totalCount() {
duke@435 338 size_t num = totalCountInIndexedFreeLists();
duke@435 339 num += dictionary()->totalCount();
duke@435 340 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 341 num++;
duke@435 342 }
duke@435 343 return num;
duke@435 344 }
duke@435 345 #endif
duke@435 346
duke@435 347 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 348 FreeChunk* fc = (FreeChunk*) p;
duke@435 349 return fc->isFree();
duke@435 350 }
duke@435 351
duke@435 352 size_t CompactibleFreeListSpace::used() const {
duke@435 353 return capacity() - free();
duke@435 354 }
duke@435 355
duke@435 356 size_t CompactibleFreeListSpace::free() const {
duke@435 357 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 358 // if you do this while the structures are in flux you
duke@435 359 // may get an approximate answer only; for instance
duke@435 360 // because there is concurrent allocation either
duke@435 361 // directly by mutators or for promotion during a GC.
duke@435 362 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 363 // not to crash and burn, for instance, because of walking
duke@435 364 // pointers that could disappear as you were walking them.
duke@435 365 // The approximation is because the various components
duke@435 366 // that are read below are not read atomically (and
duke@435 367 // further the computation of totalSizeInIndexedFreeLists()
duke@435 368 // is itself a non-atomic computation. The normal use of
duke@435 369 // this is during a resize operation at the end of GC
duke@435 370 // and at that time you are guaranteed to get the
duke@435 371 // correct actual value. However, for instance, this is
duke@435 372 // also read completely asynchronously by the "perf-sampler"
duke@435 373 // that supports jvmstat, and you are apt to see the values
duke@435 374 // flicker in such cases.
duke@435 375 assert(_dictionary != NULL, "No _dictionary?");
duke@435 376 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 377 totalSizeInIndexedFreeLists() +
duke@435 378 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 379 }
duke@435 380
duke@435 381 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 382 assert(_dictionary != NULL, "No _dictionary?");
duke@435 383 assert_locked();
duke@435 384 size_t res = _dictionary->maxChunkSize();
duke@435 385 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 386 (size_t) SmallForLinearAlloc - 1));
duke@435 387 // XXX the following could potentially be pretty slow;
duke@435 388 // should one, pesimally for the rare cases when res
duke@435 389 // caclulated above is less than IndexSetSize,
duke@435 390 // just return res calculated above? My reasoning was that
duke@435 391 // those cases will be so rare that the extra time spent doesn't
duke@435 392 // really matter....
duke@435 393 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 394 // to i > res below, because i is unsigned and res may be zero.
duke@435 395 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 396 i -= IndexSetStride) {
duke@435 397 if (_indexedFreeList[i].head() != NULL) {
duke@435 398 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 399 return i;
duke@435 400 }
duke@435 401 }
duke@435 402 return res;
duke@435 403 }
duke@435 404
ysr@1580 405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 406 const {
ysr@1580 407 reportIndexedFreeListStatistics();
ysr@1580 408 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 409 gclog_or_tty->print_cr("---------------------------");
ysr@1580 410 FreeList::print_labels_on(st, "size");
ysr@1580 411 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 412 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 413 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 414 fc = fc->next()) {
ysr@1580 415 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 416 fc, (HeapWord*)fc + i,
ysr@1580 417 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 418 }
ysr@1580 419 }
ysr@1580 420 }
ysr@1580 421
ysr@1580 422 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 423 const {
ysr@1580 424 _promoInfo.print_on(st);
ysr@1580 425 }
ysr@1580 426
ysr@1580 427 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 428 const {
ysr@1580 429 _dictionary->reportStatistics();
ysr@1580 430 st->print_cr("Layout of Freelists in Tree");
ysr@1580 431 st->print_cr("---------------------------");
ysr@1580 432 _dictionary->print_free_lists(st);
ysr@1580 433 }
ysr@1580 434
ysr@1580 435 class BlkPrintingClosure: public BlkClosure {
ysr@1580 436 const CMSCollector* _collector;
ysr@1580 437 const CompactibleFreeListSpace* _sp;
ysr@1580 438 const CMSBitMap* _live_bit_map;
ysr@1580 439 const bool _post_remark;
ysr@1580 440 outputStream* _st;
ysr@1580 441 public:
ysr@1580 442 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 443 const CompactibleFreeListSpace* sp,
ysr@1580 444 const CMSBitMap* live_bit_map,
ysr@1580 445 outputStream* st):
ysr@1580 446 _collector(collector),
ysr@1580 447 _sp(sp),
ysr@1580 448 _live_bit_map(live_bit_map),
ysr@1580 449 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 450 _st(st) { }
ysr@1580 451 size_t do_blk(HeapWord* addr);
ysr@1580 452 };
ysr@1580 453
ysr@1580 454 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 455 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 456 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 457 if (_sp->block_is_obj(addr)) {
ysr@1580 458 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 459 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 460 addr,
ysr@1580 461 dead ? "dead" : "live",
ysr@1580 462 sz,
ysr@1580 463 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 464 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 465 oop(addr)->print_on(_st);
ysr@1580 466 _st->print_cr("--------------------------------------");
ysr@1580 467 }
ysr@1580 468 } else { // free block
ysr@1580 469 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 470 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 471 if (CMSPrintChunksInDump) {
ysr@1580 472 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 473 _st->print_cr("--------------------------------------");
ysr@1580 474 }
ysr@1580 475 }
ysr@1580 476 return sz;
ysr@1580 477 }
ysr@1580 478
ysr@1580 479 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 480 outputStream* st) {
ysr@1580 481 st->print_cr("\n=========================");
ysr@1580 482 st->print_cr("Block layout in CMS Heap:");
ysr@1580 483 st->print_cr("=========================");
ysr@1580 484 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 485 blk_iterate(&bpcl);
ysr@1580 486
ysr@1580 487 st->print_cr("\n=======================================");
ysr@1580 488 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 489 st->print_cr("=======================================");
ysr@1580 490 print_promo_info_blocks(st);
ysr@1580 491
ysr@1580 492 st->print_cr("\n===========================");
ysr@1580 493 st->print_cr("Order of Indexed Free Lists");
ysr@1580 494 st->print_cr("=========================");
ysr@1580 495 print_indexed_free_lists(st);
ysr@1580 496
ysr@1580 497 st->print_cr("\n=================================");
ysr@1580 498 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 499 st->print_cr("=================================");
ysr@1580 500 print_dictionary_free_lists(st);
ysr@1580 501 }
ysr@1580 502
ysr@1580 503
duke@435 504 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 505 assert_lock_strong(&_freelistLock);
duke@435 506 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 507 _dictionary->reportStatistics();
duke@435 508 if (PrintFLSStatistics > 1) {
duke@435 509 reportIndexedFreeListStatistics();
duke@435 510 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 511 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 512 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 513 }
duke@435 514 }
duke@435 515
duke@435 516 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 517 assert_lock_strong(&_freelistLock);
duke@435 518 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 519 "--------------------------------\n");
duke@435 520 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 521 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 522 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 523 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 524 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 525 if (freeBlocks != 0) {
duke@435 526 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 527 }
duke@435 528 }
duke@435 529
duke@435 530 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 531 size_t res = 0;
duke@435 532 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 533 debug_only(
duke@435 534 ssize_t recount = 0;
duke@435 535 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 536 fc = fc->next()) {
duke@435 537 recount += 1;
duke@435 538 }
duke@435 539 assert(recount == _indexedFreeList[i].count(),
duke@435 540 "Incorrect count in list");
duke@435 541 )
duke@435 542 res += _indexedFreeList[i].count();
duke@435 543 }
duke@435 544 return res;
duke@435 545 }
duke@435 546
duke@435 547 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 548 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 549 if (_indexedFreeList[i].head() != NULL) {
duke@435 550 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 551 return (size_t)i;
duke@435 552 }
duke@435 553 }
duke@435 554 return 0;
duke@435 555 }
duke@435 556
duke@435 557 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 558 HeapWord* prevEnd = end();
duke@435 559 assert(prevEnd != value, "unnecessary set_end call");
duke@435 560 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
duke@435 561 _end = value;
duke@435 562 if (prevEnd != NULL) {
duke@435 563 // Resize the underlying block offset table.
duke@435 564 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 565 if (value <= prevEnd) {
ysr@1580 566 assert(value >= unallocated_block(), "New end is below unallocated block");
ysr@1580 567 } else {
ysr@1580 568 // Now, take this new chunk and add it to the free blocks.
ysr@1580 569 // Note that the BOT has not yet been updated for this block.
ysr@1580 570 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 571 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 572 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 573 // Mark the boundary of the new block in BOT
ysr@1580 574 _bt.mark_block(prevEnd, value);
ysr@1580 575 // put it all in the linAB
ysr@1580 576 if (ParallelGCThreads == 0) {
ysr@1580 577 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 578 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 579 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 580 } else { // ParallelGCThreads > 0
ysr@1580 581 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 582 Mutex::_no_safepoint_check_flag);
ysr@1580 583 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 584 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 585 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 586 }
ysr@1580 587 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 588 // of chunks as they are allocated out of a LinAB are.
ysr@1580 589 } else {
ysr@1580 590 // Add the block to the free lists, if possible coalescing it
ysr@1580 591 // with the last free block, and update the BOT and census data.
ysr@1580 592 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 593 }
duke@435 594 }
duke@435 595 }
duke@435 596 }
duke@435 597
duke@435 598 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 599 CompactibleFreeListSpace* _cfls;
duke@435 600 CMSCollector* _collector;
duke@435 601 protected:
duke@435 602 // Override.
duke@435 603 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 604 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 605 HeapWord* bottom, HeapWord* top, \
duke@435 606 ClosureType* cl); \
duke@435 607 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 608 HeapWord* bottom, HeapWord* top, \
duke@435 609 ClosureType* cl); \
duke@435 610 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 611 HeapWord* bottom, HeapWord* top, \
duke@435 612 ClosureType* cl)
duke@435 613 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 614 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 615
duke@435 616 public:
duke@435 617 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 618 CMSCollector* collector,
duke@435 619 OopClosure* cl,
duke@435 620 CardTableModRefBS::PrecisionStyle precision,
duke@435 621 HeapWord* boundary) :
duke@435 622 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 623 _cfls(sp), _collector(collector) {}
duke@435 624 };
duke@435 625
duke@435 626 // We de-virtualize the block-related calls below, since we know that our
duke@435 627 // space is a CompactibleFreeListSpace.
duke@435 628 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 629 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 630 HeapWord* bottom, \
duke@435 631 HeapWord* top, \
duke@435 632 ClosureType* cl) { \
duke@435 633 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 634 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 635 } else { \
duke@435 636 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 637 } \
duke@435 638 } \
duke@435 639 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 640 HeapWord* bottom, \
duke@435 641 HeapWord* top, \
duke@435 642 ClosureType* cl) { \
duke@435 643 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 644 back too far. */ \
duke@435 645 HeapWord* mr_start = mr.start(); \
duke@435 646 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 647 HeapWord* next = bottom + bot_size; \
duke@435 648 while (next < mr_start) { \
duke@435 649 bottom = next; \
duke@435 650 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 651 next = bottom + bot_size; \
duke@435 652 } \
duke@435 653 \
duke@435 654 while (bottom < top) { \
duke@435 655 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 656 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 657 oop(bottom)) && \
duke@435 658 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 659 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 660 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 661 } else { \
duke@435 662 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 663 } \
duke@435 664 } \
duke@435 665 } \
duke@435 666 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 667 HeapWord* bottom, \
duke@435 668 HeapWord* top, \
duke@435 669 ClosureType* cl) { \
duke@435 670 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 671 back too far. */ \
duke@435 672 HeapWord* mr_start = mr.start(); \
duke@435 673 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 674 HeapWord* next = bottom + bot_size; \
duke@435 675 while (next < mr_start) { \
duke@435 676 bottom = next; \
duke@435 677 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 678 next = bottom + bot_size; \
duke@435 679 } \
duke@435 680 \
duke@435 681 while (bottom < top) { \
duke@435 682 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 683 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 684 oop(bottom)) && \
duke@435 685 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 686 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 687 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 688 } else { \
duke@435 689 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 690 } \
duke@435 691 } \
duke@435 692 }
duke@435 693
duke@435 694 // (There are only two of these, rather than N, because the split is due
duke@435 695 // only to the introduction of the FilteringClosure, a local part of the
duke@435 696 // impl of this abstraction.)
duke@435 697 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 698 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 699
duke@435 700 DirtyCardToOopClosure*
duke@435 701 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 702 CardTableModRefBS::PrecisionStyle precision,
duke@435 703 HeapWord* boundary) {
duke@435 704 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 705 }
duke@435 706
duke@435 707
duke@435 708 // Note on locking for the space iteration functions:
duke@435 709 // since the collector's iteration activities are concurrent with
duke@435 710 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 711 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 712 // may suddenly be allocated or divided up and part of it allocated and
duke@435 713 // so on.
duke@435 714
duke@435 715 // Apply the given closure to each block in the space.
duke@435 716 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 717 assert_lock_strong(freelistLock());
duke@435 718 HeapWord *cur, *limit;
duke@435 719 for (cur = bottom(), limit = end(); cur < limit;
duke@435 720 cur += cl->do_blk_careful(cur));
duke@435 721 }
duke@435 722
duke@435 723 // Apply the given closure to each block in the space.
duke@435 724 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 725 assert_lock_strong(freelistLock());
duke@435 726 HeapWord *cur, *limit;
duke@435 727 for (cur = bottom(), limit = end(); cur < limit;
duke@435 728 cur += cl->do_blk(cur));
duke@435 729 }
duke@435 730
duke@435 731 // Apply the given closure to each oop in the space.
duke@435 732 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 733 assert_lock_strong(freelistLock());
duke@435 734 HeapWord *cur, *limit;
duke@435 735 size_t curSize;
duke@435 736 for (cur = bottom(), limit = end(); cur < limit;
duke@435 737 cur += curSize) {
duke@435 738 curSize = block_size(cur);
duke@435 739 if (block_is_obj(cur)) {
duke@435 740 oop(cur)->oop_iterate(cl);
duke@435 741 }
duke@435 742 }
duke@435 743 }
duke@435 744
duke@435 745 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 746 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 747 assert_lock_strong(freelistLock());
duke@435 748 if (is_empty()) {
duke@435 749 return;
duke@435 750 }
duke@435 751 MemRegion cur = MemRegion(bottom(), end());
duke@435 752 mr = mr.intersection(cur);
duke@435 753 if (mr.is_empty()) {
duke@435 754 return;
duke@435 755 }
duke@435 756 if (mr.equals(cur)) {
duke@435 757 oop_iterate(cl);
duke@435 758 return;
duke@435 759 }
duke@435 760 assert(mr.end() <= end(), "just took an intersection above");
duke@435 761 HeapWord* obj_addr = block_start(mr.start());
duke@435 762 HeapWord* t = mr.end();
duke@435 763
duke@435 764 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 765 if (block_is_obj(obj_addr)) {
duke@435 766 // Handle first object specially.
duke@435 767 oop obj = oop(obj_addr);
duke@435 768 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 769 } else {
duke@435 770 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 771 obj_addr += fc->size();
duke@435 772 }
duke@435 773 while (obj_addr < t) {
duke@435 774 HeapWord* obj = obj_addr;
duke@435 775 obj_addr += block_size(obj_addr);
duke@435 776 // If "obj_addr" is not greater than top, then the
duke@435 777 // entire object "obj" is within the region.
duke@435 778 if (obj_addr <= t) {
duke@435 779 if (block_is_obj(obj)) {
duke@435 780 oop(obj)->oop_iterate(cl);
duke@435 781 }
duke@435 782 } else {
duke@435 783 // "obj" extends beyond end of region
duke@435 784 if (block_is_obj(obj)) {
duke@435 785 oop(obj)->oop_iterate(&smr_blk);
duke@435 786 }
duke@435 787 break;
duke@435 788 }
duke@435 789 }
duke@435 790 }
duke@435 791
duke@435 792 // NOTE: In the following methods, in order to safely be able to
duke@435 793 // apply the closure to an object, we need to be sure that the
duke@435 794 // object has been initialized. We are guaranteed that an object
duke@435 795 // is initialized if we are holding the Heap_lock with the
duke@435 796 // world stopped.
duke@435 797 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 798 if (is_init_completed()) {
duke@435 799 assert_locked_or_safepoint(Heap_lock);
duke@435 800 if (Universe::is_fully_initialized()) {
duke@435 801 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 802 "Required for objects to be initialized");
duke@435 803 }
duke@435 804 } // else make a concession at vm start-up
duke@435 805 }
duke@435 806
duke@435 807 // Apply the given closure to each object in the space
duke@435 808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 809 assert_lock_strong(freelistLock());
duke@435 810 NOT_PRODUCT(verify_objects_initialized());
duke@435 811 HeapWord *cur, *limit;
duke@435 812 size_t curSize;
duke@435 813 for (cur = bottom(), limit = end(); cur < limit;
duke@435 814 cur += curSize) {
duke@435 815 curSize = block_size(cur);
duke@435 816 if (block_is_obj(cur)) {
duke@435 817 blk->do_object(oop(cur));
duke@435 818 }
duke@435 819 }
duke@435 820 }
duke@435 821
jmasa@952 822 // Apply the given closure to each live object in the space
jmasa@952 823 // The usage of CompactibleFreeListSpace
jmasa@952 824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 825 // objects in the space with references to objects that are no longer
jmasa@952 826 // valid. For example, an object may reference another object
jmasa@952 827 // that has already been sweep up (collected). This method uses
jmasa@952 828 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 829 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 830 // object is decided.
jmasa@952 831
jmasa@952 832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 833 assert_lock_strong(freelistLock());
jmasa@952 834 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 835 HeapWord *cur, *limit;
jmasa@952 836 size_t curSize;
jmasa@952 837 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 838 cur += curSize) {
jmasa@952 839 curSize = block_size(cur);
jmasa@952 840 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 841 blk->do_object(oop(cur));
jmasa@952 842 }
jmasa@952 843 }
jmasa@952 844 }
jmasa@952 845
duke@435 846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 847 UpwardsObjectClosure* cl) {
ysr@1580 848 assert_locked(freelistLock());
duke@435 849 NOT_PRODUCT(verify_objects_initialized());
duke@435 850 Space::object_iterate_mem(mr, cl);
duke@435 851 }
duke@435 852
duke@435 853 // Callers of this iterator beware: The closure application should
duke@435 854 // be robust in the face of uninitialized objects and should (always)
duke@435 855 // return a correct size so that the next addr + size below gives us a
duke@435 856 // valid block boundary. [See for instance,
duke@435 857 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 858 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 859 HeapWord*
duke@435 860 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 861 assert_lock_strong(freelistLock());
duke@435 862 HeapWord *addr, *last;
duke@435 863 size_t size;
duke@435 864 for (addr = bottom(), last = end();
duke@435 865 addr < last; addr += size) {
duke@435 866 FreeChunk* fc = (FreeChunk*)addr;
duke@435 867 if (fc->isFree()) {
duke@435 868 // Since we hold the free list lock, which protects direct
duke@435 869 // allocation in this generation by mutators, a free object
duke@435 870 // will remain free throughout this iteration code.
duke@435 871 size = fc->size();
duke@435 872 } else {
duke@435 873 // Note that the object need not necessarily be initialized,
duke@435 874 // because (for instance) the free list lock does NOT protect
duke@435 875 // object initialization. The closure application below must
duke@435 876 // therefore be correct in the face of uninitialized objects.
duke@435 877 size = cl->do_object_careful(oop(addr));
duke@435 878 if (size == 0) {
duke@435 879 // An unparsable object found. Signal early termination.
duke@435 880 return addr;
duke@435 881 }
duke@435 882 }
duke@435 883 }
duke@435 884 return NULL;
duke@435 885 }
duke@435 886
duke@435 887 // Callers of this iterator beware: The closure application should
duke@435 888 // be robust in the face of uninitialized objects and should (always)
duke@435 889 // return a correct size so that the next addr + size below gives us a
duke@435 890 // valid block boundary. [See for instance,
duke@435 891 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 892 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 893 HeapWord*
duke@435 894 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 895 ObjectClosureCareful* cl) {
duke@435 896 assert_lock_strong(freelistLock());
duke@435 897 // Can't use used_region() below because it may not necessarily
duke@435 898 // be the same as [bottom(),end()); although we could
duke@435 899 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 900 // that appears too cumbersome, so we just do the simpler check
duke@435 901 // in the assertion below.
duke@435 902 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 903 "mr should be non-empty and within used space");
duke@435 904 HeapWord *addr, *end;
duke@435 905 size_t size;
duke@435 906 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 907 addr < end; addr += size) {
duke@435 908 FreeChunk* fc = (FreeChunk*)addr;
duke@435 909 if (fc->isFree()) {
duke@435 910 // Since we hold the free list lock, which protects direct
duke@435 911 // allocation in this generation by mutators, a free object
duke@435 912 // will remain free throughout this iteration code.
duke@435 913 size = fc->size();
duke@435 914 } else {
duke@435 915 // Note that the object need not necessarily be initialized,
duke@435 916 // because (for instance) the free list lock does NOT protect
duke@435 917 // object initialization. The closure application below must
duke@435 918 // therefore be correct in the face of uninitialized objects.
duke@435 919 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 920 if (size == 0) {
duke@435 921 // An unparsable object found. Signal early termination.
duke@435 922 return addr;
duke@435 923 }
duke@435 924 }
duke@435 925 }
duke@435 926 return NULL;
duke@435 927 }
duke@435 928
duke@435 929
ysr@777 930 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 931 NOT_PRODUCT(verify_objects_initialized());
duke@435 932 return _bt.block_start(p);
duke@435 933 }
duke@435 934
duke@435 935 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 936 return _bt.block_start_careful(p);
duke@435 937 }
duke@435 938
duke@435 939 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 940 NOT_PRODUCT(verify_objects_initialized());
duke@435 941 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 942 // This must be volatile, or else there is a danger that the compiler
duke@435 943 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 944 // the value read the first time in a register.
duke@435 945 while (true) {
duke@435 946 // We must do this until we get a consistent view of the object.
coleenp@622 947 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 948 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 949 size_t res = fc->size();
coleenp@622 950 // If the object is still a free chunk, return the size, else it
coleenp@622 951 // has been allocated so try again.
coleenp@622 952 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 953 assert(res != 0, "Block size should not be 0");
duke@435 954 return res;
duke@435 955 }
coleenp@622 956 } else {
coleenp@622 957 // must read from what 'p' points to in each loop.
coleenp@622 958 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 959 if (k != NULL) {
coleenp@622 960 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
coleenp@622 961 oop o = (oop)p;
coleenp@622 962 assert(o->is_parsable(), "Should be parsable");
coleenp@622 963 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 964 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 965 res = adjustObjectSize(res);
coleenp@622 966 assert(res != 0, "Block size should not be 0");
coleenp@622 967 return res;
coleenp@622 968 }
duke@435 969 }
duke@435 970 }
duke@435 971 }
duke@435 972
duke@435 973 // A variant of the above that uses the Printezis bits for
duke@435 974 // unparsable but allocated objects. This avoids any possible
duke@435 975 // stalls waiting for mutators to initialize objects, and is
duke@435 976 // thus potentially faster than the variant above. However,
duke@435 977 // this variant may return a zero size for a block that is
duke@435 978 // under mutation and for which a consistent size cannot be
duke@435 979 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 980 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 981 const CMSCollector* c)
duke@435 982 const {
duke@435 983 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 984 // This must be volatile, or else there is a danger that the compiler
duke@435 985 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 986 // the value read the first time in a register.
duke@435 987 DEBUG_ONLY(uint loops = 0;)
duke@435 988 while (true) {
duke@435 989 // We must do this until we get a consistent view of the object.
coleenp@622 990 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 991 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 992 size_t res = fc->size();
coleenp@622 993 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 994 assert(res != 0, "Block size should not be 0");
duke@435 995 assert(loops == 0, "Should be 0");
duke@435 996 return res;
duke@435 997 }
duke@435 998 } else {
coleenp@622 999 // must read from what 'p' points to in each loop.
coleenp@622 1000 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
jmasa@953 1001 if (k != NULL &&
jmasa@953 1002 ((oopDesc*)p)->is_parsable() &&
jmasa@953 1003 ((oopDesc*)p)->is_conc_safe()) {
coleenp@622 1004 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 1005 oop o = (oop)p;
coleenp@622 1006 assert(o->is_oop(), "Should be an oop");
coleenp@622 1007 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1008 res = adjustObjectSize(res);
coleenp@622 1009 assert(res != 0, "Block size should not be 0");
coleenp@622 1010 return res;
coleenp@622 1011 } else {
coleenp@622 1012 return c->block_size_if_printezis_bits(p);
coleenp@622 1013 }
duke@435 1014 }
duke@435 1015 assert(loops == 0, "Can loop at most once");
duke@435 1016 DEBUG_ONLY(loops++;)
duke@435 1017 }
duke@435 1018 }
duke@435 1019
duke@435 1020 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1021 NOT_PRODUCT(verify_objects_initialized());
duke@435 1022 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1023 FreeChunk* fc = (FreeChunk*)p;
duke@435 1024 if (fc->isFree()) {
duke@435 1025 return fc->size();
duke@435 1026 } else {
duke@435 1027 // Ignore mark word because this may be a recently promoted
duke@435 1028 // object whose mark word is used to chain together grey
duke@435 1029 // objects (the last one would have a null value).
duke@435 1030 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1031 return adjustObjectSize(oop(p)->size());
duke@435 1032 }
duke@435 1033 }
duke@435 1034
duke@435 1035 // This implementation assumes that the property of "being an object" is
duke@435 1036 // stable. But being a free chunk may not be (because of parallel
duke@435 1037 // promotion.)
duke@435 1038 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1039 FreeChunk* fc = (FreeChunk*)p;
duke@435 1040 assert(is_in_reserved(p), "Should be in space");
duke@435 1041 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1042 // assertion may fail because prepare_for_compaction() uses
duke@435 1043 // space that is garbage to maintain information on ranges of
duke@435 1044 // live objects so that these live ranges can be moved as a whole.
duke@435 1045 // Comment out this assertion until that problem can be solved
duke@435 1046 // (i.e., that the block start calculation may look at objects
duke@435 1047 // at address below "p" in finding the object that contains "p"
duke@435 1048 // and those objects (if garbage) may have been modified to hold
duke@435 1049 // live range information.
duke@435 1050 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
coleenp@622 1051 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1052 klassOop k = oop(p)->klass_or_null();
duke@435 1053 if (k != NULL) {
duke@435 1054 // Ignore mark word because it may have been used to
duke@435 1055 // chain together promoted objects (the last one
duke@435 1056 // would have a null value).
duke@435 1057 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1058 return true;
duke@435 1059 } else {
duke@435 1060 return false; // Was not an object at the start of collection.
duke@435 1061 }
duke@435 1062 }
duke@435 1063
duke@435 1064 // Check if the object is alive. This fact is checked either by consulting
duke@435 1065 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1066 // generation and we're not in the sweeping phase, by checking the
duke@435 1067 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1068 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1069 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
duke@435 1070 assert (block_is_obj(p), "The address should point to an object");
duke@435 1071
duke@435 1072 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1073 // for both perm gen and old gen.
duke@435 1074 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1075 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1076 // main marking bit map (live_map below) is locked,
duke@435 1077 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1078 // is stable, because it's mutated only in the sweeping phase.
duke@435 1079 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1080 CMSBitMap* live_map = _collector->markBitMap();
duke@435 1081 return live_map->isMarked((HeapWord*) p);
duke@435 1082 } else {
duke@435 1083 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1084 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1085 // in the perm gen. In this case, we use the "deadness" information
duke@435 1086 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1087 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1088 if (_collector->verifying()) {
duke@435 1089 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1090 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1091 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1092 // the object is alive.
duke@435 1093 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1094 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1095 } else {
duke@435 1096 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1097 }
duke@435 1098 }
duke@435 1099 }
duke@435 1100 return true;
duke@435 1101 }
duke@435 1102
duke@435 1103 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1104 FreeChunk* fc = (FreeChunk*)p;
duke@435 1105 assert(is_in_reserved(p), "Should be in space");
duke@435 1106 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 1107 if (!fc->isFree()) {
duke@435 1108 // Ignore mark word because it may have been used to
duke@435 1109 // chain together promoted objects (the last one
duke@435 1110 // would have a null value).
duke@435 1111 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1112 return true;
duke@435 1113 }
duke@435 1114 return false;
duke@435 1115 }
duke@435 1116
duke@435 1117 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1118 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1119 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1120 size_t size = 0;
duke@435 1121 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1122 debug_only(
duke@435 1123 // We may be calling here without the lock in which case we
duke@435 1124 // won't do this modest sanity check.
duke@435 1125 if (freelistLock()->owned_by_self()) {
duke@435 1126 size_t total_list_size = 0;
duke@435 1127 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1128 fc = fc->next()) {
duke@435 1129 total_list_size += i;
duke@435 1130 }
duke@435 1131 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1132 "Count in list is incorrect");
duke@435 1133 }
duke@435 1134 )
duke@435 1135 size += i * _indexedFreeList[i].count();
duke@435 1136 }
duke@435 1137 return size;
duke@435 1138 }
duke@435 1139
duke@435 1140 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1141 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1142 return allocate(size);
duke@435 1143 }
duke@435 1144
duke@435 1145 HeapWord*
duke@435 1146 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1147 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1148 }
duke@435 1149
duke@435 1150 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1151 assert_lock_strong(freelistLock());
duke@435 1152 HeapWord* res = NULL;
duke@435 1153 assert(size == adjustObjectSize(size),
duke@435 1154 "use adjustObjectSize() before calling into allocate()");
duke@435 1155
duke@435 1156 if (_adaptive_freelists) {
duke@435 1157 res = allocate_adaptive_freelists(size);
duke@435 1158 } else { // non-adaptive free lists
duke@435 1159 res = allocate_non_adaptive_freelists(size);
duke@435 1160 }
duke@435 1161
duke@435 1162 if (res != NULL) {
duke@435 1163 // check that res does lie in this space!
duke@435 1164 assert(is_in_reserved(res), "Not in this space!");
duke@435 1165 assert(is_aligned((void*)res), "alignment check");
duke@435 1166
duke@435 1167 FreeChunk* fc = (FreeChunk*)res;
duke@435 1168 fc->markNotFree();
duke@435 1169 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1170 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1171 // Verify that the block offset table shows this to
duke@435 1172 // be a single block, but not one which is unallocated.
duke@435 1173 _bt.verify_single_block(res, size);
duke@435 1174 _bt.verify_not_unallocated(res, size);
duke@435 1175 // mangle a just allocated object with a distinct pattern.
duke@435 1176 debug_only(fc->mangleAllocated(size));
duke@435 1177 }
duke@435 1178
duke@435 1179 return res;
duke@435 1180 }
duke@435 1181
duke@435 1182 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1183 HeapWord* res = NULL;
duke@435 1184 // try and use linear allocation for smaller blocks
duke@435 1185 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1186 // if successful, the following also adjusts block offset table
duke@435 1187 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1188 }
duke@435 1189 // Else triage to indexed lists for smaller sizes
duke@435 1190 if (res == NULL) {
duke@435 1191 if (size < SmallForDictionary) {
duke@435 1192 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1193 } else {
duke@435 1194 // else get it from the big dictionary; if even this doesn't
duke@435 1195 // work we are out of luck.
duke@435 1196 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1197 }
duke@435 1198 }
duke@435 1199
duke@435 1200 return res;
duke@435 1201 }
duke@435 1202
duke@435 1203 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1204 assert_lock_strong(freelistLock());
duke@435 1205 HeapWord* res = NULL;
duke@435 1206 assert(size == adjustObjectSize(size),
duke@435 1207 "use adjustObjectSize() before calling into allocate()");
duke@435 1208
duke@435 1209 // Strategy
duke@435 1210 // if small
duke@435 1211 // exact size from small object indexed list if small
duke@435 1212 // small or large linear allocation block (linAB) as appropriate
duke@435 1213 // take from lists of greater sized chunks
duke@435 1214 // else
duke@435 1215 // dictionary
duke@435 1216 // small or large linear allocation block if it has the space
duke@435 1217 // Try allocating exact size from indexTable first
duke@435 1218 if (size < IndexSetSize) {
duke@435 1219 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1220 if(res != NULL) {
duke@435 1221 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1222 "Not removed from free list");
duke@435 1223 // no block offset table adjustment is necessary on blocks in
duke@435 1224 // the indexed lists.
duke@435 1225
duke@435 1226 // Try allocating from the small LinAB
duke@435 1227 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1228 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1229 // if successful, the above also adjusts block offset table
duke@435 1230 // Note that this call will refill the LinAB to
duke@435 1231 // satisfy the request. This is different that
duke@435 1232 // evm.
duke@435 1233 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1234
duke@435 1235 } else {
duke@435 1236 // Raid the exact free lists larger than size, even if they are not
duke@435 1237 // overpopulated.
duke@435 1238 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1239 }
duke@435 1240 } else {
duke@435 1241 // Big objects get allocated directly from the dictionary.
duke@435 1242 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1243 if (res == NULL) {
duke@435 1244 // Try hard not to fail since an allocation failure will likely
duke@435 1245 // trigger a synchronous GC. Try to get the space from the
duke@435 1246 // allocation blocks.
duke@435 1247 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1248 }
duke@435 1249 }
duke@435 1250
duke@435 1251 return res;
duke@435 1252 }
duke@435 1253
duke@435 1254 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1255 // when promoting obj.
duke@435 1256 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1257 // Depending on the object size, expansion may require refilling either a
duke@435 1258 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1259 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1260 size_t space = obj_size;
duke@435 1261 if (!_adaptive_freelists) {
duke@435 1262 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1263 }
duke@435 1264 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1265 return space;
duke@435 1266 }
duke@435 1267
duke@435 1268 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1269 FreeChunk* ret;
duke@435 1270
duke@435 1271 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1272 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1273 "Should not be here");
duke@435 1274
duke@435 1275 size_t i;
duke@435 1276 size_t currSize = numWords + MinChunkSize;
duke@435 1277 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1278 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1279 FreeList* fl = &_indexedFreeList[i];
duke@435 1280 if (fl->head()) {
duke@435 1281 ret = getFromListGreater(fl, numWords);
duke@435 1282 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1283 return ret;
duke@435 1284 }
duke@435 1285 }
duke@435 1286
duke@435 1287 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1288 (size_t)(numWords + MinChunkSize));
duke@435 1289
duke@435 1290 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1291 fragmentation that can't be handled. */
duke@435 1292 {
duke@435 1293 ret = dictionary()->getChunk(currSize);
duke@435 1294 if (ret != NULL) {
duke@435 1295 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1296 "Chunk is too small");
duke@435 1297 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1298 /* Carve returned chunk. */
duke@435 1299 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1300 /* Label this as no longer a free chunk. */
duke@435 1301 assert(ret->isFree(), "This chunk should be free");
duke@435 1302 ret->linkPrev(NULL);
duke@435 1303 }
duke@435 1304 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1305 return ret;
duke@435 1306 }
duke@435 1307 ShouldNotReachHere();
duke@435 1308 }
duke@435 1309
duke@435 1310 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
duke@435 1311 const {
duke@435 1312 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1313 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1314 }
duke@435 1315
duke@435 1316 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
duke@435 1317 if (fc->size() >= IndexSetSize) {
duke@435 1318 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1319 } else {
duke@435 1320 return verifyChunkInIndexedFreeLists(fc);
duke@435 1321 }
duke@435 1322 }
duke@435 1323
duke@435 1324 #ifndef PRODUCT
duke@435 1325 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1326 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1327 }
ysr@1580 1328
ysr@1580 1329 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1330 CMSLockVerifier::assert_locked(lock);
ysr@1580 1331 }
duke@435 1332 #endif
duke@435 1333
duke@435 1334 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1335 // In the parallel case, the main thread holds the free list lock
duke@435 1336 // on behalf the parallel threads.
duke@435 1337 FreeChunk* fc;
duke@435 1338 {
duke@435 1339 // If GC is parallel, this might be called by several threads.
duke@435 1340 // This should be rare enough that the locking overhead won't affect
duke@435 1341 // the sequential code.
duke@435 1342 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1343 Mutex::_no_safepoint_check_flag);
duke@435 1344 fc = getChunkFromDictionary(size);
duke@435 1345 }
duke@435 1346 if (fc != NULL) {
duke@435 1347 fc->dontCoalesce();
duke@435 1348 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1349 // Verify that the block offset table shows this to
duke@435 1350 // be a single block, but not one which is unallocated.
duke@435 1351 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1352 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1353 }
duke@435 1354 return fc;
duke@435 1355 }
duke@435 1356
coleenp@548 1357 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1358 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1359 assert_locked();
duke@435 1360
duke@435 1361 // if we are tracking promotions, then first ensure space for
duke@435 1362 // promotion (including spooling space for saving header if necessary).
duke@435 1363 // then allocate and copy, then track promoted info if needed.
duke@435 1364 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1365 // be displaced and in this case restoration of the mark word
duke@435 1366 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1367 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1368 return NULL;
duke@435 1369 }
duke@435 1370 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1371 // additional call through the allocate(size_t) form. Having
duke@435 1372 // the compile inline the call is problematic because allocate(size_t)
duke@435 1373 // is a virtual method.
duke@435 1374 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1375 if (res != NULL) {
duke@435 1376 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1377 // if we should be tracking promotions, do so.
duke@435 1378 if (_promoInfo.tracking()) {
duke@435 1379 _promoInfo.track((PromotedObject*)res);
duke@435 1380 }
duke@435 1381 }
duke@435 1382 return oop(res);
duke@435 1383 }
duke@435 1384
duke@435 1385 HeapWord*
duke@435 1386 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1387 assert_locked();
duke@435 1388 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1389 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1390 "maximum from smallLinearAllocBlock");
duke@435 1391 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1392 }
duke@435 1393
duke@435 1394 HeapWord*
duke@435 1395 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1396 size_t size) {
duke@435 1397 assert_locked();
duke@435 1398 assert(size >= MinChunkSize, "too small");
duke@435 1399 HeapWord* res = NULL;
duke@435 1400 // Try to do linear allocation from blk, making sure that
duke@435 1401 if (blk->_word_size == 0) {
duke@435 1402 // We have probably been unable to fill this either in the prologue or
duke@435 1403 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1404 // next time.
duke@435 1405 assert(blk->_ptr == NULL, "consistency check");
duke@435 1406 return NULL;
duke@435 1407 }
duke@435 1408 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1409 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1410 if (res != NULL) return res;
duke@435 1411
duke@435 1412 // about to exhaust this linear allocation block
duke@435 1413 if (blk->_word_size == size) { // exactly satisfied
duke@435 1414 res = blk->_ptr;
duke@435 1415 _bt.allocated(res, blk->_word_size);
duke@435 1416 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1417 size_t sz = blk->_word_size;
duke@435 1418 // Update _unallocated_block if the size is such that chunk would be
duke@435 1419 // returned to the indexed free list. All other chunks in the indexed
duke@435 1420 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1421 // has already been adjusted for them. Do it here so that the cost
duke@435 1422 // for all chunks added back to the indexed free lists.
ysr@1580 1423 if (sz < SmallForDictionary) {
ysr@1580 1424 _bt.allocated(blk->_ptr, sz);
duke@435 1425 }
duke@435 1426 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1427 addChunkToFreeLists(blk->_ptr, sz);
ysr@1580 1428 splitBirth(sz);
duke@435 1429 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1430 } else {
duke@435 1431 // A refilled block would not satisfy the request.
duke@435 1432 return NULL;
duke@435 1433 }
duke@435 1434
duke@435 1435 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1436 refillLinearAllocBlock(blk);
duke@435 1437 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1438 "block was replenished");
duke@435 1439 if (res != NULL) {
duke@435 1440 splitBirth(size);
duke@435 1441 repairLinearAllocBlock(blk);
duke@435 1442 } else if (blk->_ptr != NULL) {
duke@435 1443 res = blk->_ptr;
duke@435 1444 size_t blk_size = blk->_word_size;
duke@435 1445 blk->_word_size -= size;
duke@435 1446 blk->_ptr += size;
duke@435 1447 splitBirth(size);
duke@435 1448 repairLinearAllocBlock(blk);
duke@435 1449 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1450 // view of the BOT and free blocks.
duke@435 1451 // Above must occur before BOT is updated below.
duke@435 1452 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1453 }
duke@435 1454 return res;
duke@435 1455 }
duke@435 1456
duke@435 1457 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1458 LinearAllocBlock* blk,
duke@435 1459 size_t size) {
duke@435 1460 assert_locked();
duke@435 1461 assert(size >= MinChunkSize, "too small");
duke@435 1462
duke@435 1463 HeapWord* res = NULL;
duke@435 1464 // This is the common case. Keep it simple.
duke@435 1465 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1466 assert(blk->_ptr != NULL, "consistency check");
duke@435 1467 res = blk->_ptr;
duke@435 1468 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1469 // indicates the start of the linAB. The split_block() updates the
duke@435 1470 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1471 // next chunk to be allocated).
duke@435 1472 size_t blk_size = blk->_word_size;
duke@435 1473 blk->_word_size -= size;
duke@435 1474 blk->_ptr += size;
duke@435 1475 splitBirth(size);
duke@435 1476 repairLinearAllocBlock(blk);
duke@435 1477 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1478 // view of the BOT and free blocks.
duke@435 1479 // Above must occur before BOT is updated below.
duke@435 1480 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1481 _bt.allocated(res, size);
duke@435 1482 }
duke@435 1483 return res;
duke@435 1484 }
duke@435 1485
duke@435 1486 FreeChunk*
duke@435 1487 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1488 assert_locked();
duke@435 1489 assert(size < SmallForDictionary, "just checking");
duke@435 1490 FreeChunk* res;
duke@435 1491 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1492 if (res == NULL) {
duke@435 1493 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1494 }
duke@435 1495 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1496 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1497 return res;
duke@435 1498 }
duke@435 1499
duke@435 1500 FreeChunk*
ysr@1580 1501 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1502 bool replenish) {
duke@435 1503 assert_locked();
duke@435 1504 FreeChunk* fc = NULL;
duke@435 1505 if (size < SmallForDictionary) {
duke@435 1506 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1507 _indexedFreeList[size].surplus() <= 0,
duke@435 1508 "List for this size should be empty or under populated");
duke@435 1509 // Try best fit in exact lists before replenishing the list
duke@435 1510 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1511 // Replenish list.
duke@435 1512 //
duke@435 1513 // Things tried that failed.
duke@435 1514 // Tried allocating out of the two LinAB's first before
duke@435 1515 // replenishing lists.
duke@435 1516 // Tried small linAB of size 256 (size in indexed list)
duke@435 1517 // and replenishing indexed lists from the small linAB.
duke@435 1518 //
duke@435 1519 FreeChunk* newFc = NULL;
ysr@1580 1520 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1521 if (replenish_size < SmallForDictionary) {
duke@435 1522 // Do not replenish from an underpopulated size.
duke@435 1523 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1524 _indexedFreeList[replenish_size].head() != NULL) {
ysr@1580 1525 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
ysr@1580 1526 } else if (bestFitFirst()) {
duke@435 1527 newFc = bestFitSmall(replenish_size);
duke@435 1528 }
duke@435 1529 }
ysr@1580 1530 if (newFc == NULL && replenish_size > size) {
ysr@1580 1531 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1532 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1533 }
ysr@1580 1534 // Note: The stats update re split-death of block obtained above
ysr@1580 1535 // will be recorded below precisely when we know we are going to
ysr@1580 1536 // be actually splitting it into more than one pieces below.
duke@435 1537 if (newFc != NULL) {
ysr@1580 1538 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1539 // Replenish this list and return one block to caller.
ysr@1580 1540 size_t i;
ysr@1580 1541 FreeChunk *curFc, *nextFc;
ysr@1580 1542 size_t num_blk = newFc->size() / size;
ysr@1580 1543 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1544 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1545 if (num_blk > 1) {
ysr@1580 1546 // we are sure we will be splitting the block just obtained
ysr@1580 1547 // into multiple pieces; record the split-death of the original
ysr@1580 1548 splitDeath(replenish_size);
ysr@1580 1549 }
ysr@1580 1550 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1551 // The last chunk is not added to the lists but is returned as the
ysr@1580 1552 // free chunk.
ysr@1580 1553 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1554 i = 0;
ysr@1580 1555 i < (num_blk - 1);
ysr@1580 1556 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1557 i++) {
ysr@1580 1558 curFc->setSize(size);
ysr@1580 1559 // Don't record this as a return in order to try and
ysr@1580 1560 // determine the "returns" from a GC.
ysr@1580 1561 _bt.verify_not_unallocated((HeapWord*) fc, size);
ysr@1580 1562 _indexedFreeList[size].returnChunkAtTail(curFc, false);
ysr@1580 1563 _bt.mark_block((HeapWord*)curFc, size);
ysr@1580 1564 splitBirth(size);
ysr@1580 1565 // Don't record the initial population of the indexed list
ysr@1580 1566 // as a split birth.
ysr@1580 1567 }
ysr@1580 1568
ysr@1580 1569 // check that the arithmetic was OK above
ysr@1580 1570 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1571 "inconsistency in carving newFc");
duke@435 1572 curFc->setSize(size);
duke@435 1573 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1574 splitBirth(size);
ysr@1580 1575 fc = curFc;
ysr@1580 1576 } else {
ysr@1580 1577 // Return entire block to caller
ysr@1580 1578 fc = newFc;
duke@435 1579 }
duke@435 1580 }
duke@435 1581 }
duke@435 1582 } else {
duke@435 1583 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1584 // replenish the indexed free list.
duke@435 1585 fc = getChunkFromDictionaryExact(size);
duke@435 1586 }
ysr@1580 1587 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1588 return fc;
duke@435 1589 }
duke@435 1590
duke@435 1591 FreeChunk*
duke@435 1592 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1593 assert_locked();
duke@435 1594 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1595 if (fc == NULL) {
duke@435 1596 return NULL;
duke@435 1597 }
duke@435 1598 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1599 if (fc->size() >= size + MinChunkSize) {
duke@435 1600 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1601 }
duke@435 1602 assert(fc->size() >= size, "chunk too small");
duke@435 1603 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1604 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1605 return fc;
duke@435 1606 }
duke@435 1607
duke@435 1608 FreeChunk*
duke@435 1609 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1610 assert_locked();
duke@435 1611 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1612 if (fc == NULL) {
duke@435 1613 return fc;
duke@435 1614 }
duke@435 1615 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1616 if (fc->size() == size) {
duke@435 1617 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1618 return fc;
duke@435 1619 }
duke@435 1620 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1621 if (fc->size() < size + MinChunkSize) {
duke@435 1622 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1623 returnChunkToDictionary(fc);
duke@435 1624 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1625 if (fc == NULL) {
duke@435 1626 return NULL;
duke@435 1627 }
duke@435 1628 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1629 }
duke@435 1630 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1631 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1632 assert(fc->size() == size, "chunk is wrong size");
duke@435 1633 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1634 return fc;
duke@435 1635 }
duke@435 1636
duke@435 1637 void
duke@435 1638 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1639 assert_locked();
duke@435 1640
duke@435 1641 size_t size = chunk->size();
duke@435 1642 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1643 // adjust _unallocated_block downward, as necessary
duke@435 1644 _bt.freed((HeapWord*)chunk, size);
duke@435 1645 _dictionary->returnChunk(chunk);
ysr@1580 1646 #ifndef PRODUCT
ysr@1580 1647 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1648 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1649 }
ysr@1580 1650 #endif // PRODUCT
duke@435 1651 }
duke@435 1652
duke@435 1653 void
duke@435 1654 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1655 assert_locked();
duke@435 1656 size_t size = fc->size();
duke@435 1657 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1658 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1659 if (_adaptive_freelists) {
duke@435 1660 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1661 } else {
duke@435 1662 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1663 }
ysr@1580 1664 #ifndef PRODUCT
ysr@1580 1665 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1666 _indexedFreeList[size].verify_stats();
ysr@1580 1667 }
ysr@1580 1668 #endif // PRODUCT
duke@435 1669 }
duke@435 1670
duke@435 1671 // Add chunk to end of last block -- if it's the largest
duke@435 1672 // block -- and update BOT and census data. We would
duke@435 1673 // of course have preferred to coalesce it with the
duke@435 1674 // last block, but it's currently less expensive to find the
duke@435 1675 // largest block than it is to find the last.
duke@435 1676 void
duke@435 1677 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1678 HeapWord* chunk, size_t size) {
duke@435 1679 // check that the chunk does lie in this space!
duke@435 1680 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1681 // One of the parallel gc task threads may be here
duke@435 1682 // whilst others are allocating.
duke@435 1683 Mutex* lock = NULL;
duke@435 1684 if (ParallelGCThreads != 0) {
duke@435 1685 lock = &_parDictionaryAllocLock;
duke@435 1686 }
duke@435 1687 FreeChunk* ec;
duke@435 1688 {
duke@435 1689 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1690 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1691 if (ec != NULL && ec->end() == chunk) {
duke@435 1692 // It's a coterminal block - we can coalesce.
duke@435 1693 size_t old_size = ec->size();
duke@435 1694 coalDeath(old_size);
duke@435 1695 removeChunkFromDictionary(ec);
duke@435 1696 size += old_size;
duke@435 1697 } else {
duke@435 1698 ec = (FreeChunk*)chunk;
duke@435 1699 }
duke@435 1700 }
duke@435 1701 ec->setSize(size);
duke@435 1702 debug_only(ec->mangleFreed(size));
duke@435 1703 if (size < SmallForDictionary) {
duke@435 1704 lock = _indexedFreeListParLocks[size];
duke@435 1705 }
duke@435 1706 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1707 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1708 // record the birth under the lock since the recording involves
duke@435 1709 // manipulation of the list on which the chunk lives and
duke@435 1710 // if the chunk is allocated and is the last on the list,
duke@435 1711 // the list can go away.
duke@435 1712 coalBirth(size);
duke@435 1713 }
duke@435 1714
duke@435 1715 void
duke@435 1716 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1717 size_t size) {
duke@435 1718 // check that the chunk does lie in this space!
duke@435 1719 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1720 assert_locked();
duke@435 1721 _bt.verify_single_block(chunk, size);
duke@435 1722
duke@435 1723 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1724 fc->setSize(size);
duke@435 1725 debug_only(fc->mangleFreed(size));
duke@435 1726 if (size < SmallForDictionary) {
duke@435 1727 returnChunkToFreeList(fc);
duke@435 1728 } else {
duke@435 1729 returnChunkToDictionary(fc);
duke@435 1730 }
duke@435 1731 }
duke@435 1732
duke@435 1733 void
duke@435 1734 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1735 size_t size, bool coalesced) {
duke@435 1736 assert_locked();
duke@435 1737 assert(chunk != NULL, "null chunk");
duke@435 1738 if (coalesced) {
duke@435 1739 // repair BOT
duke@435 1740 _bt.single_block(chunk, size);
duke@435 1741 }
duke@435 1742 addChunkToFreeLists(chunk, size);
duke@435 1743 }
duke@435 1744
duke@435 1745 // We _must_ find the purported chunk on our free lists;
duke@435 1746 // we assert if we don't.
duke@435 1747 void
duke@435 1748 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1749 size_t size = fc->size();
duke@435 1750 assert_locked();
duke@435 1751 debug_only(verifyFreeLists());
duke@435 1752 if (size < SmallForDictionary) {
duke@435 1753 removeChunkFromIndexedFreeList(fc);
duke@435 1754 } else {
duke@435 1755 removeChunkFromDictionary(fc);
duke@435 1756 }
duke@435 1757 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1758 debug_only(verifyFreeLists());
duke@435 1759 }
duke@435 1760
duke@435 1761 void
duke@435 1762 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1763 size_t size = fc->size();
duke@435 1764 assert_locked();
duke@435 1765 assert(fc != NULL, "null chunk");
duke@435 1766 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1767 _dictionary->removeChunk(fc);
duke@435 1768 // adjust _unallocated_block upward, as necessary
duke@435 1769 _bt.allocated((HeapWord*)fc, size);
duke@435 1770 }
duke@435 1771
duke@435 1772 void
duke@435 1773 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1774 assert_locked();
duke@435 1775 size_t size = fc->size();
duke@435 1776 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1777 NOT_PRODUCT(
duke@435 1778 if (FLSVerifyIndexTable) {
duke@435 1779 verifyIndexedFreeList(size);
duke@435 1780 }
duke@435 1781 )
duke@435 1782 _indexedFreeList[size].removeChunk(fc);
duke@435 1783 debug_only(fc->clearNext());
duke@435 1784 debug_only(fc->clearPrev());
duke@435 1785 NOT_PRODUCT(
duke@435 1786 if (FLSVerifyIndexTable) {
duke@435 1787 verifyIndexedFreeList(size);
duke@435 1788 }
duke@435 1789 )
duke@435 1790 }
duke@435 1791
duke@435 1792 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1793 /* A hint is the next larger size that has a surplus.
duke@435 1794 Start search at a size large enough to guarantee that
duke@435 1795 the excess is >= MIN_CHUNK. */
duke@435 1796 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1797 if (start < IndexSetSize) {
duke@435 1798 FreeList* it = _indexedFreeList;
duke@435 1799 size_t hint = _indexedFreeList[start].hint();
duke@435 1800 while (hint < IndexSetSize) {
duke@435 1801 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1802 FreeList *fl = &_indexedFreeList[hint];
duke@435 1803 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1804 // Found a list with surplus, reset original hint
duke@435 1805 // and split out a free chunk which is returned.
duke@435 1806 _indexedFreeList[start].set_hint(hint);
duke@435 1807 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1808 assert(res == NULL || res->isFree(),
duke@435 1809 "Should be returning a free chunk");
duke@435 1810 return res;
duke@435 1811 }
duke@435 1812 hint = fl->hint(); /* keep looking */
duke@435 1813 }
duke@435 1814 /* None found. */
duke@435 1815 it[start].set_hint(IndexSetSize);
duke@435 1816 }
duke@435 1817 return NULL;
duke@435 1818 }
duke@435 1819
duke@435 1820 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1821 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1822 size_t numWords) {
duke@435 1823 FreeChunk *curr = fl->head();
duke@435 1824 size_t oldNumWords = curr->size();
duke@435 1825 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1826 assert(curr != NULL, "List is empty");
duke@435 1827 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1828 "Size of chunks in the list is too small");
duke@435 1829
duke@435 1830 fl->removeChunk(curr);
duke@435 1831 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1832 // smallSplit(oldNumWords, numWords);
duke@435 1833 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1834 // Does anything have to be done for the remainder in terms of
duke@435 1835 // fixing the card table?
duke@435 1836 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1837 "Should be returning a free chunk");
duke@435 1838 return new_chunk;
duke@435 1839 }
duke@435 1840
duke@435 1841 FreeChunk*
duke@435 1842 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1843 size_t new_size) {
duke@435 1844 assert_locked();
duke@435 1845 size_t size = chunk->size();
duke@435 1846 assert(size > new_size, "Split from a smaller block?");
duke@435 1847 assert(is_aligned(chunk), "alignment problem");
duke@435 1848 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1849 size_t rem_size = size - new_size;
duke@435 1850 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1851 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1852 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1853 assert(is_aligned(ffc), "alignment problem");
duke@435 1854 ffc->setSize(rem_size);
duke@435 1855 ffc->linkNext(NULL);
duke@435 1856 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1857 // Above must occur before BOT is updated below.
duke@435 1858 // adjust block offset table
duke@435 1859 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1860 if (rem_size < SmallForDictionary) {
duke@435 1861 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1862 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1863 returnChunkToFreeList(ffc);
duke@435 1864 split(size, rem_size);
duke@435 1865 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1866 } else {
duke@435 1867 returnChunkToDictionary(ffc);
duke@435 1868 split(size ,rem_size);
duke@435 1869 }
duke@435 1870 chunk->setSize(new_size);
duke@435 1871 return chunk;
duke@435 1872 }
duke@435 1873
duke@435 1874 void
duke@435 1875 CompactibleFreeListSpace::sweep_completed() {
duke@435 1876 // Now that space is probably plentiful, refill linear
duke@435 1877 // allocation blocks as needed.
duke@435 1878 refillLinearAllocBlocksIfNeeded();
duke@435 1879 }
duke@435 1880
duke@435 1881 void
duke@435 1882 CompactibleFreeListSpace::gc_prologue() {
duke@435 1883 assert_locked();
duke@435 1884 if (PrintFLSStatistics != 0) {
duke@435 1885 gclog_or_tty->print("Before GC:\n");
duke@435 1886 reportFreeListStatistics();
duke@435 1887 }
duke@435 1888 refillLinearAllocBlocksIfNeeded();
duke@435 1889 }
duke@435 1890
duke@435 1891 void
duke@435 1892 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1893 assert_locked();
duke@435 1894 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1895 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1896 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1897 }
duke@435 1898 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1899 _promoInfo.stopTrackingPromotions();
duke@435 1900 repairLinearAllocationBlocks();
duke@435 1901 // Print Space's stats
duke@435 1902 if (PrintFLSStatistics != 0) {
duke@435 1903 gclog_or_tty->print("After GC:\n");
duke@435 1904 reportFreeListStatistics();
duke@435 1905 }
duke@435 1906 }
duke@435 1907
duke@435 1908 // Iteration support, mostly delegated from a CMS generation
duke@435 1909
duke@435 1910 void CompactibleFreeListSpace::save_marks() {
duke@435 1911 // mark the "end" of the used space at the time of this call;
duke@435 1912 // note, however, that promoted objects from this point
duke@435 1913 // on are tracked in the _promoInfo below.
duke@435 1914 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
duke@435 1915 unallocated_block() : end());
duke@435 1916 // inform allocator that promotions should be tracked.
duke@435 1917 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1918 _promoInfo.startTrackingPromotions();
duke@435 1919 }
duke@435 1920
duke@435 1921 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1922 assert(_promoInfo.tracking(), "No preceding save_marks?");
duke@435 1923 guarantee(SharedHeap::heap()->n_par_threads() == 0,
duke@435 1924 "Shouldn't be called (yet) during parallel part of gc.");
duke@435 1925 return _promoInfo.noPromotions();
duke@435 1926 }
duke@435 1927
duke@435 1928 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 1929 \
duke@435 1930 void CompactibleFreeListSpace:: \
duke@435 1931 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 1932 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 1933 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 1934 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 1935 /* \
duke@435 1936 * This also restores any displaced headers and removes the elements from \
duke@435 1937 * the iteration set as they are processed, so that we have a clean slate \
duke@435 1938 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 1939 * promoted as a result of the iteration they are iterated over as well. \
duke@435 1940 */ \
duke@435 1941 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 1942 }
duke@435 1943
duke@435 1944 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 1945
duke@435 1946
duke@435 1947 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 1948 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 1949 guarantee(false, "NYI");
duke@435 1950 }
duke@435 1951
ysr@447 1952 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 1953 return _smallLinearAllocBlock._word_size == 0;
duke@435 1954 }
duke@435 1955
duke@435 1956 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 1957 // Fix up linear allocation blocks to look like free blocks
duke@435 1958 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 1959 }
duke@435 1960
duke@435 1961 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1962 assert_locked();
duke@435 1963 if (blk->_ptr != NULL) {
duke@435 1964 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 1965 "Minimum block size requirement");
duke@435 1966 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 1967 fc->setSize(blk->_word_size);
duke@435 1968 fc->linkPrev(NULL); // mark as free
duke@435 1969 fc->dontCoalesce();
duke@435 1970 assert(fc->isFree(), "just marked it free");
duke@435 1971 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 1972 }
duke@435 1973 }
duke@435 1974
duke@435 1975 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 1976 assert_locked();
duke@435 1977 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 1978 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 1979 "Size of linAB should be zero if the ptr is NULL");
duke@435 1980 // Reset the linAB refill and allocation size limit.
duke@435 1981 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 1982 }
duke@435 1983 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 1984 }
duke@435 1985
duke@435 1986 void
duke@435 1987 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 1988 assert_locked();
duke@435 1989 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 1990 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 1991 "blk invariant");
duke@435 1992 if (blk->_ptr == NULL) {
duke@435 1993 refillLinearAllocBlock(blk);
duke@435 1994 }
duke@435 1995 if (PrintMiscellaneous && Verbose) {
duke@435 1996 if (blk->_word_size == 0) {
duke@435 1997 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 1998 }
duke@435 1999 }
duke@435 2000 }
duke@435 2001
duke@435 2002 void
duke@435 2003 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2004 assert_locked();
duke@435 2005 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2006 "linear allocation block should be empty");
duke@435 2007 FreeChunk* fc;
duke@435 2008 if (blk->_refillSize < SmallForDictionary &&
duke@435 2009 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2010 // A linAB's strategy might be to use small sizes to reduce
duke@435 2011 // fragmentation but still get the benefits of allocation from a
duke@435 2012 // linAB.
duke@435 2013 } else {
duke@435 2014 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2015 }
duke@435 2016 if (fc != NULL) {
duke@435 2017 blk->_ptr = (HeapWord*)fc;
duke@435 2018 blk->_word_size = fc->size();
duke@435 2019 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2020 }
duke@435 2021 }
duke@435 2022
ysr@447 2023 // Support for concurrent collection policy decisions.
ysr@447 2024 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2025 // In the future we might want to add in frgamentation stats --
ysr@447 2026 // including erosion of the "mountain" into this decision as well.
ysr@447 2027 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2028 }
ysr@447 2029
duke@435 2030 // Support for compaction
duke@435 2031
duke@435 2032 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2033 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2034 // prepare_for_compaction() uses the space between live objects
duke@435 2035 // so that later phase can skip dead space quickly. So verification
duke@435 2036 // of the free lists doesn't work after.
duke@435 2037 }
duke@435 2038
duke@435 2039 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2040 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2041
duke@435 2042 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2043 // In other versions of adjust_pointers(), a bail out
duke@435 2044 // based on the amount of live data in the generation
duke@435 2045 // (i.e., if 0, bail out) may be used.
duke@435 2046 // Cannot test used() == 0 here because the free lists have already
duke@435 2047 // been mangled by the compaction.
duke@435 2048
duke@435 2049 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2050 // See note about verification in prepare_for_compaction().
duke@435 2051 }
duke@435 2052
duke@435 2053 void CompactibleFreeListSpace::compact() {
duke@435 2054 SCAN_AND_COMPACT(obj_size);
duke@435 2055 }
duke@435 2056
duke@435 2057 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2058 // where fbs is free block sizes
duke@435 2059 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2060 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2061 double frag = 0.0;
duke@435 2062 size_t i;
duke@435 2063
duke@435 2064 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2065 double sz = i;
duke@435 2066 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2067 }
duke@435 2068
duke@435 2069 double totFree = itabFree +
duke@435 2070 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 2071 if (totFree > 0) {
duke@435 2072 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2073 (totFree * totFree));
duke@435 2074 frag = (double)1.0 - frag;
duke@435 2075 } else {
duke@435 2076 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2077 }
duke@435 2078 return frag;
duke@435 2079 }
duke@435 2080
duke@435 2081 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2082 float inter_sweep_current,
ysr@1580 2083 float inter_sweep_estimate,
ysr@1580 2084 float intra_sweep_estimate) {
duke@435 2085 assert_locked();
duke@435 2086 size_t i;
duke@435 2087 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2088 FreeList* fl = &_indexedFreeList[i];
ysr@1580 2089 if (PrintFLSStatistics > 1) {
ysr@1580 2090 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2091 }
ysr@1580 2092 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
ysr@1580 2093 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
duke@435 2094 fl->set_beforeSweep(fl->count());
duke@435 2095 fl->set_bfrSurp(fl->surplus());
duke@435 2096 }
ysr@1580 2097 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
duke@435 2098 inter_sweep_current,
ysr@1580 2099 inter_sweep_estimate,
ysr@1580 2100 intra_sweep_estimate);
duke@435 2101 }
duke@435 2102
duke@435 2103 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2104 assert_locked();
duke@435 2105 size_t i;
duke@435 2106 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2107 FreeList *fl = &_indexedFreeList[i];
duke@435 2108 fl->set_surplus(fl->count() -
ysr@1580 2109 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2110 }
duke@435 2111 }
duke@435 2112
duke@435 2113 void CompactibleFreeListSpace::setFLHints() {
duke@435 2114 assert_locked();
duke@435 2115 size_t i;
duke@435 2116 size_t h = IndexSetSize;
duke@435 2117 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2118 FreeList *fl = &_indexedFreeList[i];
duke@435 2119 fl->set_hint(h);
duke@435 2120 if (fl->surplus() > 0) {
duke@435 2121 h = i;
duke@435 2122 }
duke@435 2123 }
duke@435 2124 }
duke@435 2125
duke@435 2126 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2127 assert_locked();
duke@435 2128 int i;
duke@435 2129 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2130 FreeList *fl = &_indexedFreeList[i];
duke@435 2131 fl->set_prevSweep(fl->count());
duke@435 2132 fl->set_coalBirths(0);
duke@435 2133 fl->set_coalDeaths(0);
duke@435 2134 fl->set_splitBirths(0);
duke@435 2135 fl->set_splitDeaths(0);
duke@435 2136 }
duke@435 2137 }
duke@435 2138
ysr@447 2139 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2140 if (PrintFLSStatistics > 0) {
ysr@1580 2141 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
ysr@1580 2142 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2143 largestAddr);
ysr@1580 2144 }
duke@435 2145 setFLSurplus();
duke@435 2146 setFLHints();
duke@435 2147 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2148 printFLCensus(sweep_count);
duke@435 2149 }
duke@435 2150 clearFLCensus();
duke@435 2151 assert_locked();
ysr@1580 2152 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
duke@435 2153 }
duke@435 2154
duke@435 2155 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2156 if (size < SmallForDictionary) {
duke@435 2157 FreeList *fl = &_indexedFreeList[size];
duke@435 2158 return (fl->coalDesired() < 0) ||
duke@435 2159 ((int)fl->count() > fl->coalDesired());
duke@435 2160 } else {
duke@435 2161 return dictionary()->coalDictOverPopulated(size);
duke@435 2162 }
duke@435 2163 }
duke@435 2164
duke@435 2165 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2166 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2167 FreeList *fl = &_indexedFreeList[size];
duke@435 2168 fl->increment_coalBirths();
duke@435 2169 fl->increment_surplus();
duke@435 2170 }
duke@435 2171
duke@435 2172 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2173 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2174 FreeList *fl = &_indexedFreeList[size];
duke@435 2175 fl->increment_coalDeaths();
duke@435 2176 fl->decrement_surplus();
duke@435 2177 }
duke@435 2178
duke@435 2179 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2180 if (size < SmallForDictionary) {
duke@435 2181 smallCoalBirth(size);
duke@435 2182 } else {
duke@435 2183 dictionary()->dictCensusUpdate(size,
duke@435 2184 false /* split */,
duke@435 2185 true /* birth */);
duke@435 2186 }
duke@435 2187 }
duke@435 2188
duke@435 2189 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2190 if(size < SmallForDictionary) {
duke@435 2191 smallCoalDeath(size);
duke@435 2192 } else {
duke@435 2193 dictionary()->dictCensusUpdate(size,
duke@435 2194 false /* split */,
duke@435 2195 false /* birth */);
duke@435 2196 }
duke@435 2197 }
duke@435 2198
duke@435 2199 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2200 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2201 FreeList *fl = &_indexedFreeList[size];
duke@435 2202 fl->increment_splitBirths();
duke@435 2203 fl->increment_surplus();
duke@435 2204 }
duke@435 2205
duke@435 2206 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2207 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2208 FreeList *fl = &_indexedFreeList[size];
duke@435 2209 fl->increment_splitDeaths();
duke@435 2210 fl->decrement_surplus();
duke@435 2211 }
duke@435 2212
duke@435 2213 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2214 if (size < SmallForDictionary) {
duke@435 2215 smallSplitBirth(size);
duke@435 2216 } else {
duke@435 2217 dictionary()->dictCensusUpdate(size,
duke@435 2218 true /* split */,
duke@435 2219 true /* birth */);
duke@435 2220 }
duke@435 2221 }
duke@435 2222
duke@435 2223 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2224 if (size < SmallForDictionary) {
duke@435 2225 smallSplitDeath(size);
duke@435 2226 } else {
duke@435 2227 dictionary()->dictCensusUpdate(size,
duke@435 2228 true /* split */,
duke@435 2229 false /* birth */);
duke@435 2230 }
duke@435 2231 }
duke@435 2232
duke@435 2233 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2234 size_t to2 = from - to1;
duke@435 2235 splitDeath(from);
duke@435 2236 splitBirth(to1);
duke@435 2237 splitBirth(to2);
duke@435 2238 }
duke@435 2239
duke@435 2240 void CompactibleFreeListSpace::print() const {
duke@435 2241 tty->print(" CompactibleFreeListSpace");
duke@435 2242 Space::print();
duke@435 2243 }
duke@435 2244
duke@435 2245 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2246 assert_locked();
duke@435 2247 repairLinearAllocationBlocks();
duke@435 2248 // Verify that the SpoolBlocks look like free blocks of
duke@435 2249 // appropriate sizes... To be done ...
duke@435 2250 }
duke@435 2251
duke@435 2252 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2253 private:
duke@435 2254 const CompactibleFreeListSpace* _sp;
duke@435 2255 const MemRegion _span;
duke@435 2256
duke@435 2257 public:
duke@435 2258 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
duke@435 2259 MemRegion span) : _sp(sp), _span(span) { }
duke@435 2260
coleenp@548 2261 virtual size_t do_blk(HeapWord* addr) {
duke@435 2262 size_t res;
duke@435 2263 if (_sp->block_is_obj(addr)) {
duke@435 2264 oop p = oop(addr);
duke@435 2265 guarantee(p->is_oop(), "Should be an oop");
duke@435 2266 res = _sp->adjustObjectSize(p->size());
duke@435 2267 if (_sp->obj_is_alive(addr)) {
duke@435 2268 p->verify();
duke@435 2269 }
duke@435 2270 } else {
duke@435 2271 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2272 res = fc->size();
duke@435 2273 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2274 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2275 "Chunk should be on a free list");
duke@435 2276 }
duke@435 2277 }
duke@435 2278 guarantee(res != 0, "Livelock: no rank reduction!");
duke@435 2279 return res;
duke@435 2280 }
duke@435 2281 };
duke@435 2282
duke@435 2283 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2284 private:
duke@435 2285 const CMSCollector* _collector;
duke@435 2286 const CompactibleFreeListSpace* _sp;
duke@435 2287 const MemRegion _span;
duke@435 2288 const bool _past_remark;
duke@435 2289 const CMSBitMap* _bit_map;
duke@435 2290
coleenp@548 2291 protected:
coleenp@548 2292 void do_oop(void* p, oop obj) {
coleenp@548 2293 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2294 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2295 // Should be a valid object; the first disjunct below allows
coleenp@548 2296 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2297 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2298 // are spanned by _span (CMS heap) above.
coleenp@548 2299 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2300 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2301 "Should be an object");
coleenp@548 2302 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2303 obj->verify();
coleenp@548 2304 if (_past_remark) {
coleenp@548 2305 // Remark has been completed, the object should be marked
coleenp@548 2306 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2307 }
coleenp@548 2308 } else { // reference within CMS heap
coleenp@548 2309 if (_past_remark) {
coleenp@548 2310 // Remark has been completed -- so the referent should have
coleenp@548 2311 // been marked, if referring object is.
coleenp@548 2312 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2313 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2314 }
coleenp@548 2315 }
coleenp@548 2316 }
coleenp@548 2317 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2318 // the reference is from FLS, and points out of FLS
coleenp@548 2319 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2320 obj->verify();
coleenp@548 2321 }
coleenp@548 2322 }
coleenp@548 2323
coleenp@548 2324 template <class T> void do_oop_work(T* p) {
coleenp@548 2325 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2326 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2327 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2328 do_oop(p, obj);
coleenp@548 2329 }
coleenp@548 2330 }
coleenp@548 2331
duke@435 2332 public:
duke@435 2333 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2334 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2335 bool past_remark, CMSBitMap* bit_map) :
duke@435 2336 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2337 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2338
coleenp@548 2339 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2340 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2341 };
duke@435 2342
duke@435 2343 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2344 assert_lock_strong(&_freelistLock);
duke@435 2345 verify_objects_initialized();
duke@435 2346 MemRegion span = _collector->_span;
duke@435 2347 bool past_remark = (_collector->abstract_state() ==
duke@435 2348 CMSCollector::Sweeping);
duke@435 2349
duke@435 2350 ResourceMark rm;
duke@435 2351 HandleMark hm;
duke@435 2352
duke@435 2353 // Check integrity of CFL data structures
duke@435 2354 _promoInfo.verify();
duke@435 2355 _dictionary->verify();
duke@435 2356 if (FLSVerifyIndexTable) {
duke@435 2357 verifyIndexedFreeLists();
duke@435 2358 }
duke@435 2359 // Check integrity of all objects and free blocks in space
duke@435 2360 {
duke@435 2361 VerifyAllBlksClosure cl(this, span);
duke@435 2362 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2363 }
duke@435 2364 // Check that all references in the heap to FLS
duke@435 2365 // are to valid objects in FLS or that references in
duke@435 2366 // FLS are to valid objects elsewhere in the heap
duke@435 2367 if (FLSVerifyAllHeapReferences)
duke@435 2368 {
duke@435 2369 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2370 _collector->markBitMap());
duke@435 2371 CollectedHeap* ch = Universe::heap();
duke@435 2372 ch->oop_iterate(&cl); // all oops in generations
duke@435 2373 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2374 }
duke@435 2375
duke@435 2376 if (VerifyObjectStartArray) {
duke@435 2377 // Verify the block offset table
duke@435 2378 _bt.verify();
duke@435 2379 }
duke@435 2380 }
duke@435 2381
duke@435 2382 #ifndef PRODUCT
duke@435 2383 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2384 if (FLSVerifyLists) {
duke@435 2385 _dictionary->verify();
duke@435 2386 verifyIndexedFreeLists();
duke@435 2387 } else {
duke@435 2388 if (FLSVerifyDictionary) {
duke@435 2389 _dictionary->verify();
duke@435 2390 }
duke@435 2391 if (FLSVerifyIndexTable) {
duke@435 2392 verifyIndexedFreeLists();
duke@435 2393 }
duke@435 2394 }
duke@435 2395 }
duke@435 2396 #endif
duke@435 2397
duke@435 2398 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2399 size_t i = 0;
duke@435 2400 for (; i < MinChunkSize; i++) {
duke@435 2401 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2402 }
duke@435 2403 for (; i < IndexSetSize; i++) {
duke@435 2404 verifyIndexedFreeList(i);
duke@435 2405 }
duke@435 2406 }
duke@435 2407
duke@435 2408 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2409 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2410 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2411 size_t num = _indexedFreeList[size].count();
ysr@1580 2412 size_t n = 0;
ysr@777 2413 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
ysr@1580 2414 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2415 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2416 guarantee(fc->isFree(), "!free?");
duke@435 2417 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2418 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2419 }
ysr@1580 2420 guarantee(n == num, "Incorrect count");
duke@435 2421 }
duke@435 2422
duke@435 2423 #ifndef PRODUCT
duke@435 2424 void CompactibleFreeListSpace::checkFreeListConsistency() const {
duke@435 2425 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2426 "Some sizes can't be allocated without recourse to"
duke@435 2427 " linear allocation buffers");
duke@435 2428 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2429 "else MIN_TREE_CHUNK_SIZE is wrong");
duke@435 2430 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
duke@435 2431 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
duke@435 2432 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2433 "Some for-loops may be incorrectly initialized");
duke@435 2434 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2435 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2436 }
duke@435 2437 #endif
duke@435 2438
ysr@447 2439 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2440 assert_lock_strong(&_freelistLock);
ysr@447 2441 FreeList total;
ysr@447 2442 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2443 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2444 size_t totalFree = 0;
duke@435 2445 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2446 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2447 totalFree += fl->count() * fl->size();
ysr@447 2448 if (i % (40*IndexSetStride) == 0) {
ysr@447 2449 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2450 }
ysr@447 2451 fl->print_on(gclog_or_tty);
ysr@447 2452 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2453 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2454 total.set_desired( total.desired() + fl->desired() );
ysr@447 2455 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2456 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2457 total.set_count( total.count() + fl->count() );
ysr@447 2458 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2459 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2460 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2461 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2462 }
ysr@447 2463 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2464 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2465 SIZE_FORMAT " words", totalFree);
duke@435 2466 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2467 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2468 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2469 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2470 _dictionary->printDictCensus();
duke@435 2471 }
duke@435 2472
ysr@1580 2473 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2474 // CFLS_LAB
ysr@1580 2475 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2476
ysr@1580 2477 #define VECTOR_257(x) \
ysr@1580 2478 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2479 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2480 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2481 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2482 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2483 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2484 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2485 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2486 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2487 x }
ysr@1580 2488
ysr@1580 2489 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2490 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2491 // command-line, this will get reinitialized via a call to
ysr@1580 2492 // modify_initialization() below.
ysr@1580 2493 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2494 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2495 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
ysr@1580 2496 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2497
duke@435 2498 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2499 _cfls(cfls)
duke@435 2500 {
ysr@1580 2501 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2502 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2503 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2504 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2505 _indexedFreeList[i].set_size(i);
ysr@1580 2506 _num_blocks[i] = 0;
ysr@1580 2507 }
ysr@1580 2508 }
ysr@1580 2509
ysr@1580 2510 static bool _CFLS_LAB_modified = false;
ysr@1580 2511
ysr@1580 2512 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2513 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2514 _CFLS_LAB_modified = true;
ysr@1580 2515 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2516 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2517 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2518 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2519 }
duke@435 2520 }
duke@435 2521
duke@435 2522 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2523 FreeChunk* res;
duke@435 2524 word_sz = _cfls->adjustObjectSize(word_sz);
duke@435 2525 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2526 // This locking manages sync with other large object allocations.
duke@435 2527 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2528 Mutex::_no_safepoint_check_flag);
duke@435 2529 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2530 if (res == NULL) return NULL;
duke@435 2531 } else {
duke@435 2532 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2533 if (fl->count() == 0) {
duke@435 2534 // Attempt to refill this local free list.
ysr@1580 2535 get_from_global_pool(word_sz, fl);
duke@435 2536 // If it didn't work, give up.
duke@435 2537 if (fl->count() == 0) return NULL;
duke@435 2538 }
duke@435 2539 res = fl->getChunkAtHead();
duke@435 2540 assert(res != NULL, "Why was count non-zero?");
duke@435 2541 }
duke@435 2542 res->markNotFree();
duke@435 2543 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2544 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2545 // mangle a just allocated object with a distinct pattern.
duke@435 2546 debug_only(res->mangleAllocated(word_sz));
duke@435 2547 return (HeapWord*)res;
duke@435 2548 }
duke@435 2549
ysr@1580 2550 // Get a chunk of blocks of the right size and update related
ysr@1580 2551 // book-keeping stats
ysr@1580 2552 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
ysr@1580 2553 // Get the #blocks we want to claim
ysr@1580 2554 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2555 assert(n_blks > 0, "Error");
ysr@1580 2556 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2557 // In some cases, when the application has a phase change,
ysr@1580 2558 // there may be a sudden and sharp shift in the object survival
ysr@1580 2559 // profile, and updating the counts at the end of a scavenge
ysr@1580 2560 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2561 // during these phase changes. It is beneficial to detect such
ysr@1580 2562 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2563 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2564 // It is protected by a product flag until we have gained
ysr@1580 2565 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2566 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2567 // small spikes, so some kind of historical smoothing based on
ysr@1580 2568 // previous experience with the greater reactivity might be useful.
ysr@1580 2569 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2570 // default.
ysr@1580 2571 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2572 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2573 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2574 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2575 }
ysr@1580 2576 assert(n_blks > 0, "Error");
ysr@1580 2577 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2578 // Update stats table entry for this block size
ysr@1580 2579 _num_blocks[word_sz] += fl->count();
ysr@1580 2580 }
ysr@1580 2581
ysr@1580 2582 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2583 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2584 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2585 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2586 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2587 "Counter inconsistency");
ysr@1580 2588 if (_global_num_workers[i] > 0) {
ysr@1580 2589 // Need to smooth wrt historical average
ysr@1580 2590 if (ResizeOldPLAB) {
ysr@1580 2591 _blocks_to_claim[i].sample(
ysr@1580 2592 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2593 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2594 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2595 }
ysr@1580 2596 // Reset counters for next round
ysr@1580 2597 _global_num_workers[i] = 0;
ysr@1580 2598 _global_num_blocks[i] = 0;
ysr@1580 2599 if (PrintOldPLAB) {
ysr@1580 2600 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2601 }
duke@435 2602 }
duke@435 2603 }
duke@435 2604 }
duke@435 2605
ysr@1580 2606 void CFLS_LAB::retire(int tid) {
ysr@1580 2607 // We run this single threaded with the world stopped;
ysr@1580 2608 // so no need for locks and such.
ysr@1580 2609 #define CFLS_LAB_PARALLEL_ACCESS 0
ysr@1580 2610 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2611 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2612 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
ysr@1580 2613 "Will access to uninitialized slot below");
ysr@1580 2614 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2615 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
ysr@1580 2616 i > 0;
ysr@1580 2617 i -= CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2618 #else // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2619 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2620 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2621 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2622 #endif // !CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2623 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2624 "Can't retire more than what we obtained");
ysr@1580 2625 if (_num_blocks[i] > 0) {
ysr@1580 2626 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2627 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2628 {
ysr@1580 2629 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2630 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@1580 2631 Mutex::_no_safepoint_check_flag);
ysr@1580 2632 #endif // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2633 // Update globals stats for num_blocks used
ysr@1580 2634 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2635 _global_num_workers[i]++;
ysr@1580 2636 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
ysr@1580 2637 if (num_retire > 0) {
ysr@1580 2638 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2639 // Reset this list.
ysr@1580 2640 _indexedFreeList[i] = FreeList();
ysr@1580 2641 _indexedFreeList[i].set_size(i);
ysr@1580 2642 }
ysr@1580 2643 }
ysr@1580 2644 if (PrintOldPLAB) {
ysr@1580 2645 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2646 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2647 }
ysr@1580 2648 // Reset stats for next round
ysr@1580 2649 _num_blocks[i] = 0;
ysr@1580 2650 }
ysr@1580 2651 }
ysr@1580 2652 }
ysr@1580 2653
ysr@1580 2654 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2655 assert(fl->count() == 0, "Precondition.");
duke@435 2656 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2657 "Precondition");
duke@435 2658
ysr@1580 2659 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2660 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2661 // then try getting a big chunk and splitting it.
ysr@1580 2662 {
ysr@1580 2663 bool found;
ysr@1580 2664 int k;
ysr@1580 2665 size_t cur_sz;
ysr@1580 2666 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2667 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2668 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2669 k++, cur_sz = k * word_sz) {
ysr@1580 2670 FreeList* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2671 FreeList fl_for_cur_sz; // Empty.
ysr@1580 2672 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2673 {
ysr@1580 2674 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2675 Mutex::_no_safepoint_check_flag);
ysr@1580 2676 if (gfl->count() != 0) {
ysr@1580 2677 // nn is the number of chunks of size cur_sz that
ysr@1580 2678 // we'd need to split k-ways each, in order to create
ysr@1580 2679 // "n" chunks of size word_sz each.
ysr@1580 2680 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2681 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2682 found = true;
ysr@1580 2683 if (k > 1) {
ysr@1580 2684 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2685 // we increment the split death count by the number of blocks
ysr@1580 2686 // we just took from the cur_sz-size blocks list and which
ysr@1580 2687 // we will be splitting below.
ysr@1580 2688 ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
ysr@1580 2689 fl_for_cur_sz.count();
ysr@1580 2690 _indexedFreeList[cur_sz].set_splitDeaths(deaths);
ysr@1580 2691 }
ysr@1580 2692 }
ysr@1580 2693 }
ysr@1580 2694 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2695 if (found) {
ysr@1580 2696 if (k == 1) {
ysr@1580 2697 fl->prepend(&fl_for_cur_sz);
ysr@1580 2698 } else {
ysr@1580 2699 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2700 FreeChunk* fc;
ysr@1580 2701 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
ysr@1580 2702 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2703 // access the main chunk sees it as a single free block until we
ysr@1580 2704 // change it.
ysr@1580 2705 size_t fc_size = fc->size();
ysr@1580 2706 for (int i = k-1; i >= 0; i--) {
ysr@1580 2707 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@1580 2708 ffc->setSize(word_sz);
ysr@1580 2709 ffc->linkNext(NULL);
ysr@1580 2710 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ysr@1580 2711 // Above must occur before BOT is updated below.
ysr@1580 2712 // splitting from the right, fc_size == (k - i + 1) * wordsize
ysr@1580 2713 _bt.mark_block((HeapWord*)ffc, word_sz);
ysr@1580 2714 fc_size -= word_sz;
ysr@1580 2715 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
ysr@1580 2716 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@1580 2717 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
ysr@1580 2718 // Push this on "fl".
ysr@1580 2719 fl->returnChunkAtHead(ffc);
ysr@1580 2720 }
ysr@1580 2721 // TRAP
ysr@1580 2722 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2723 }
ysr@1580 2724 }
ysr@1580 2725 // Update birth stats for this block size.
ysr@1580 2726 size_t num = fl->count();
ysr@1580 2727 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2728 Mutex::_no_safepoint_check_flag);
ysr@1580 2729 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
ysr@1580 2730 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2731 return;
duke@435 2732 }
duke@435 2733 }
duke@435 2734 }
duke@435 2735 // Otherwise, we'll split a block from the dictionary.
duke@435 2736 FreeChunk* fc = NULL;
duke@435 2737 FreeChunk* rem_fc = NULL;
duke@435 2738 size_t rem;
duke@435 2739 {
duke@435 2740 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2741 Mutex::_no_safepoint_check_flag);
duke@435 2742 while (n > 0) {
duke@435 2743 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2744 _dictionary->minSize()),
duke@435 2745 FreeBlockDictionary::atLeast);
duke@435 2746 if (fc != NULL) {
duke@435 2747 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
duke@435 2748 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2749 true /*split*/,
duke@435 2750 false /*birth*/);
duke@435 2751 break;
duke@435 2752 } else {
duke@435 2753 n--;
duke@435 2754 }
duke@435 2755 }
duke@435 2756 if (fc == NULL) return;
ysr@1580 2757 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2758 // Otherwise, split up that block.
ysr@1580 2759 const size_t nn = fc->size() / word_sz;
duke@435 2760 n = MIN2(nn, n);
ysr@1580 2761 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2762 rem = fc->size() - n * word_sz;
duke@435 2763 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2764 if (rem > 0 && rem < MinChunkSize) {
duke@435 2765 n--; rem += word_sz;
duke@435 2766 }
jmasa@1583 2767 // Note that at this point we may have n == 0.
jmasa@1583 2768 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2769
jmasa@1583 2770 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2771 // enough to leave a viable remainder. We are unable to
jmasa@1583 2772 // allocate even one block. Return fc to the
jmasa@1583 2773 // dictionary and return, leaving "fl" empty.
jmasa@1583 2774 if (n == 0) {
jmasa@1583 2775 returnChunkToDictionary(fc);
jmasa@1583 2776 return;
jmasa@1583 2777 }
jmasa@1583 2778
duke@435 2779 // First return the remainder, if any.
duke@435 2780 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2781 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2782 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2783 // hit if the block is a small block.)
duke@435 2784 if (rem > 0) {
duke@435 2785 size_t prefix_size = n * word_sz;
duke@435 2786 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2787 rem_fc->setSize(rem);
duke@435 2788 rem_fc->linkNext(NULL);
duke@435 2789 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2790 // Above must occur before BOT is updated below.
ysr@1580 2791 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
duke@435 2792 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
duke@435 2793 if (rem >= IndexSetSize) {
duke@435 2794 returnChunkToDictionary(rem_fc);
ysr@1580 2795 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
duke@435 2796 rem_fc = NULL;
duke@435 2797 }
duke@435 2798 // Otherwise, return it to the small list below.
duke@435 2799 }
duke@435 2800 }
duke@435 2801 if (rem_fc != NULL) {
duke@435 2802 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2803 Mutex::_no_safepoint_check_flag);
duke@435 2804 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2805 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2806 smallSplitBirth(rem);
duke@435 2807 }
ysr@1580 2808 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2809 // Now do the splitting up.
duke@435 2810 // Must do this in reverse order, so that anybody attempting to
duke@435 2811 // access the main chunk sees it as a single free block until we
duke@435 2812 // change it.
duke@435 2813 size_t fc_size = n * word_sz;
duke@435 2814 // All but first chunk in this loop
duke@435 2815 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2816 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2817 ffc->setSize(word_sz);
duke@435 2818 ffc->linkNext(NULL);
duke@435 2819 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2820 // Above must occur before BOT is updated below.
duke@435 2821 // splitting from the right, fc_size == (n - i + 1) * wordsize
duke@435 2822 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 2823 fc_size -= word_sz;
duke@435 2824 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2825 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2826 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2827 // Push this on "fl".
duke@435 2828 fl->returnChunkAtHead(ffc);
duke@435 2829 }
duke@435 2830 // First chunk
duke@435 2831 fc->setSize(word_sz);
duke@435 2832 fc->linkNext(NULL);
duke@435 2833 fc->linkPrev(NULL);
duke@435 2834 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2835 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2836 fl->returnChunkAtHead(fc);
duke@435 2837
ysr@1580 2838 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2839 {
ysr@1580 2840 // Update the stats for this block size.
duke@435 2841 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2842 Mutex::_no_safepoint_check_flag);
ysr@1580 2843 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
ysr@1580 2844 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2845 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2846 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2847 }
duke@435 2848
duke@435 2849 // TRAP
duke@435 2850 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2851 }
duke@435 2852
duke@435 2853 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2854 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2855 // XXX Need to suitably abstract and generalize this and the next
duke@435 2856 // method into one.
duke@435 2857 void
duke@435 2858 CompactibleFreeListSpace::
duke@435 2859 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2860 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2861 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2862 const size_t task_size = rescan_task_size();
duke@435 2863 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2864 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2865 assert(n_tasks == 0 ||
ysr@775 2866 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2867 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2868 "n_tasks calculation incorrect");
duke@435 2869 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2870 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2871 pst->set_par_threads(n_threads);
duke@435 2872 pst->set_n_tasks((int)n_tasks);
duke@435 2873 }
duke@435 2874
duke@435 2875 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2876 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2877 void
duke@435 2878 CompactibleFreeListSpace::
duke@435 2879 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2880 HeapWord* low) {
duke@435 2881 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2882 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2883 const size_t task_size = marking_task_size();
duke@435 2884 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 2885 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 2886 "Otherwise arithmetic below would be incorrect");
duke@435 2887 MemRegion span = _gen->reserved();
duke@435 2888 if (low != NULL) {
duke@435 2889 if (span.contains(low)) {
duke@435 2890 // Align low down to a card boundary so that
duke@435 2891 // we can use block_offset_careful() on span boundaries.
duke@435 2892 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 2893 CardTableModRefBS::card_size);
duke@435 2894 // Clip span prefix at aligned_low
duke@435 2895 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 2896 } else if (low > span.end()) {
duke@435 2897 span = MemRegion(low, low); // Null region
duke@435 2898 } // else use entire span
duke@435 2899 }
duke@435 2900 assert(span.is_empty() ||
duke@435 2901 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 2902 "span should start at a card boundary");
duke@435 2903 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 2904 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 2905 assert(n_tasks == 0 ||
duke@435 2906 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 2907 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 2908 "n_tasks calculation incorrect");
duke@435 2909 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2910 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2911 pst->set_par_threads(n_threads);
duke@435 2912 pst->set_n_tasks((int)n_tasks);
duke@435 2913 }

mercurial