src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 09 Nov 2010 10:47:20 -0800

author
ysr
date
Tue, 09 Nov 2010 10:47:20 -0800
changeset 2294
4df7f8cba524
parent 2293
899bbbdcb6ea
child 2301
9eecf81a02fb
permissions
-rw-r--r--

6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
Reviewed-by: tonyp

duke@435 1 /*
ysr@2071 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
duke@435 27
duke@435 28 /////////////////////////////////////////////////////////////////////////
duke@435 29 //// CompactibleFreeListSpace
duke@435 30 /////////////////////////////////////////////////////////////////////////
duke@435 31
duke@435 32 // highest ranked free list lock rank
duke@435 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 34
kvn@1926 35 // Defaults are 0 so things will break badly if incorrectly initialized.
kvn@1926 36 int CompactibleFreeListSpace::IndexSetStart = 0;
kvn@1926 37 int CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 38
kvn@1926 39 size_t MinChunkSize = 0;
kvn@1926 40
kvn@1926 41 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 42 // Set CMS global values
kvn@1926 43 assert(MinChunkSize == 0, "already set");
kvn@1926 44 #define numQuanta(x,y) ((x+y-1)/y)
kvn@1926 45 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
kvn@1926 46
kvn@1926 47 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
kvn@1926 48 IndexSetStart = MinObjAlignment;
kvn@1926 49 IndexSetStride = MinObjAlignment;
kvn@1926 50 }
kvn@1926 51
duke@435 52 // Constructor
duke@435 53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 54 MemRegion mr, bool use_adaptive_freelists,
duke@435 55 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 56 _dictionaryChoice(dictionaryChoice),
duke@435 57 _adaptive_freelists(use_adaptive_freelists),
duke@435 58 _bt(bs, mr),
duke@435 59 // free list locks are in the range of values taken by _lockRank
duke@435 60 // This range currently is [_leaf+2, _leaf+3]
duke@435 61 // Note: this requires that CFLspace c'tors
duke@435 62 // are called serially in the order in which the locks are
duke@435 63 // are acquired in the program text. This is true today.
duke@435 64 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 65 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 66 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 67 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 68 CMSRescanMultiple),
duke@435 69 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 70 CMSConcMarkMultiple),
duke@435 71 _collector(NULL)
duke@435 72 {
duke@435 73 _bt.set_space(this);
jmasa@698 74 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 75 // We have all of "mr", all of which we place in the dictionary
duke@435 76 // as one big chunk. We'll need to decide here which of several
duke@435 77 // possible alternative dictionary implementations to use. For
duke@435 78 // now the choice is easy, since we have only one working
duke@435 79 // implementation, namely, the simple binary tree (splaying
duke@435 80 // temporarily disabled).
duke@435 81 switch (dictionaryChoice) {
duke@435 82 case FreeBlockDictionary::dictionarySplayTree:
duke@435 83 case FreeBlockDictionary::dictionarySkipList:
duke@435 84 default:
duke@435 85 warning("dictionaryChoice: selected option not understood; using"
duke@435 86 " default BinaryTreeDictionary implementation instead.");
ysr@1580 87 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 88 _dictionary = new BinaryTreeDictionary(mr);
duke@435 89 break;
duke@435 90 }
duke@435 91 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 92 // The indexed free lists are initially all empty and are lazily
duke@435 93 // filled in on demand. Initialize the array elements to NULL.
duke@435 94 initializeIndexedFreeListArray();
duke@435 95
duke@435 96 // Not using adaptive free lists assumes that allocation is first
duke@435 97 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 98 // has to have the klass's klassKlass allocated at a lower
duke@435 99 // address in the heap than the klass so that the klassKlass is
duke@435 100 // moved to its new location before the klass is moved.
duke@435 101 // Set the _refillSize for the linear allocation blocks
duke@435 102 if (!use_adaptive_freelists) {
duke@435 103 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 104 // The small linAB initially has all the space and will allocate
duke@435 105 // a chunk of any size.
duke@435 106 HeapWord* addr = (HeapWord*) fc;
duke@435 107 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 108 1024*SmallForLinearAlloc, fc->size());
duke@435 109 // Note that _unallocated_block is not updated here.
duke@435 110 // Allocations from the linear allocation block should
duke@435 111 // update it.
duke@435 112 } else {
duke@435 113 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 114 SmallForLinearAlloc);
duke@435 115 }
duke@435 116 // CMSIndexedFreeListReplenish should be at least 1
duke@435 117 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 118 _promoInfo.setSpace(this);
duke@435 119 if (UseCMSBestFit) {
duke@435 120 _fitStrategy = FreeBlockBestFitFirst;
duke@435 121 } else {
duke@435 122 _fitStrategy = FreeBlockStrategyNone;
duke@435 123 }
duke@435 124 checkFreeListConsistency();
duke@435 125
duke@435 126 // Initialize locks for parallel case.
jmasa@2188 127
jmasa@2188 128 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 129 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 130 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 131 "a freelist par lock",
duke@435 132 true);
duke@435 133 if (_indexedFreeListParLocks[i] == NULL)
duke@435 134 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 135 DEBUG_ONLY(
duke@435 136 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 137 )
duke@435 138 }
duke@435 139 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 140 }
duke@435 141 }
duke@435 142
duke@435 143 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 144 // update the block offset table. Removed initialize_threshold call because
duke@435 145 // CFLS does not use a block offset array for contiguous spaces.
duke@435 146 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 147 CompactPoint* cp, HeapWord* compact_top) {
duke@435 148 // q is alive
duke@435 149 // First check if we should switch compaction space
duke@435 150 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 151 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 152 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 153 "virtual adjustObjectSize_v() method is not correct");
duke@435 154 size_t adjusted_size = adjustObjectSize(size);
duke@435 155 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 156 "no small fragments allowed");
duke@435 157 assert(minimum_free_block_size() == MinChunkSize,
duke@435 158 "for de-virtualized reference below");
duke@435 159 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 160 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 161 adjusted_size != compaction_max_size) {
duke@435 162 do {
duke@435 163 // switch to next compaction space
duke@435 164 cp->space->set_compaction_top(compact_top);
duke@435 165 cp->space = cp->space->next_compaction_space();
duke@435 166 if (cp->space == NULL) {
duke@435 167 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 168 assert(cp->gen != NULL, "compaction must succeed");
duke@435 169 cp->space = cp->gen->first_compaction_space();
duke@435 170 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 171 }
duke@435 172 compact_top = cp->space->bottom();
duke@435 173 cp->space->set_compaction_top(compact_top);
duke@435 174 // The correct adjusted_size may not be the same as that for this method
duke@435 175 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 176 // Use the virtual method which is not used above to save the virtual
duke@435 177 // dispatch.
duke@435 178 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 179 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 180 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 181 } while (adjusted_size > compaction_max_size);
duke@435 182 }
duke@435 183
duke@435 184 // store the forwarding pointer into the mark word
duke@435 185 if ((HeapWord*)q != compact_top) {
duke@435 186 q->forward_to(oop(compact_top));
duke@435 187 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 188 } else {
duke@435 189 // if the object isn't moving we can just set the mark to the default
duke@435 190 // mark and handle it specially later on.
duke@435 191 q->init_mark();
duke@435 192 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 193 }
duke@435 194
coleenp@548 195 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 196 compact_top += adjusted_size;
duke@435 197
duke@435 198 // we need to update the offset table so that the beginnings of objects can be
duke@435 199 // found during scavenge. Note that we are updating the offset table based on
duke@435 200 // where the object will be once the compaction phase finishes.
duke@435 201
duke@435 202 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 203 // the compaction_top exceeds the current threshold but not for an
duke@435 204 // non-contiguous space.
duke@435 205 cp->threshold =
duke@435 206 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 207 return compact_top;
duke@435 208 }
duke@435 209
duke@435 210 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 211 // and use of single_block instead of alloc_block. The name here is not really
duke@435 212 // appropriate - maybe a more general name could be invented for both the
duke@435 213 // contiguous and noncontiguous spaces.
duke@435 214
duke@435 215 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 216 _bt.single_block(start, the_end);
duke@435 217 return end();
duke@435 218 }
duke@435 219
duke@435 220 // Initialize them to NULL.
duke@435 221 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 222 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 223 // Note that on platforms where objects are double word aligned,
duke@435 224 // the odd array elements are not used. It is convenient, however,
duke@435 225 // to map directly from the object size to the array element.
duke@435 226 _indexedFreeList[i].reset(IndexSetSize);
duke@435 227 _indexedFreeList[i].set_size(i);
duke@435 228 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 229 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 230 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 231 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 232 }
duke@435 233 }
duke@435 234
duke@435 235 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 236 for (int i = 1; i < IndexSetSize; i++) {
duke@435 237 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 238 "Indexed free list sizes are incorrect");
duke@435 239 _indexedFreeList[i].reset(IndexSetSize);
duke@435 240 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 241 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 242 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 243 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 244 }
duke@435 245 }
duke@435 246
duke@435 247 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 248 resetIndexedFreeListArray();
duke@435 249 dictionary()->reset();
duke@435 250 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 251 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 252 // Everything's allocated until proven otherwise.
duke@435 253 _bt.set_unallocated_block(end());
duke@435 254 }
duke@435 255 if (!mr.is_empty()) {
duke@435 256 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 257 _bt.single_block(mr.start(), mr.word_size());
duke@435 258 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 259 fc->setSize(mr.word_size());
duke@435 260 if (mr.word_size() >= IndexSetSize ) {
duke@435 261 returnChunkToDictionary(fc);
duke@435 262 } else {
duke@435 263 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 264 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 265 }
duke@435 266 }
duke@435 267 _promoInfo.reset();
duke@435 268 _smallLinearAllocBlock._ptr = NULL;
duke@435 269 _smallLinearAllocBlock._word_size = 0;
duke@435 270 }
duke@435 271
duke@435 272 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 273 // Reset the space to the new reality - one free chunk.
duke@435 274 MemRegion mr(compaction_top(), end());
duke@435 275 reset(mr);
duke@435 276 // Now refill the linear allocation block(s) if possible.
duke@435 277 if (_adaptive_freelists) {
duke@435 278 refillLinearAllocBlocksIfNeeded();
duke@435 279 } else {
duke@435 280 // Place as much of mr in the linAB as we can get,
duke@435 281 // provided it was big enough to go into the dictionary.
duke@435 282 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 283 if (fc != NULL) {
duke@435 284 assert(fc->size() == mr.word_size(),
duke@435 285 "Why was the chunk broken up?");
duke@435 286 removeChunkFromDictionary(fc);
duke@435 287 HeapWord* addr = (HeapWord*) fc;
duke@435 288 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 289 1024*SmallForLinearAlloc, fc->size());
duke@435 290 // Note that _unallocated_block is not updated here.
duke@435 291 }
duke@435 292 }
duke@435 293 }
duke@435 294
duke@435 295 // Walks the entire dictionary, returning a coterminal
duke@435 296 // chunk, if it exists. Use with caution since it involves
duke@435 297 // a potentially complete walk of a potentially large tree.
duke@435 298 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 299
duke@435 300 assert_lock_strong(&_freelistLock);
duke@435 301
duke@435 302 return dictionary()->find_chunk_ends_at(end());
duke@435 303 }
duke@435 304
duke@435 305
duke@435 306 #ifndef PRODUCT
duke@435 307 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 308 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 309 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 310 }
duke@435 311 }
duke@435 312
duke@435 313 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 314 size_t sum = 0;
duke@435 315 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 316 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 317 }
duke@435 318 return sum;
duke@435 319 }
duke@435 320
duke@435 321 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 322 size_t count = 0;
kvn@1926 323 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
duke@435 324 debug_only(
duke@435 325 ssize_t total_list_count = 0;
duke@435 326 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 327 fc = fc->next()) {
duke@435 328 total_list_count++;
duke@435 329 }
duke@435 330 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 331 "Count in list is incorrect");
duke@435 332 )
duke@435 333 count += _indexedFreeList[i].count();
duke@435 334 }
duke@435 335 return count;
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::totalCount() {
duke@435 339 size_t num = totalCountInIndexedFreeLists();
duke@435 340 num += dictionary()->totalCount();
duke@435 341 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 342 num++;
duke@435 343 }
duke@435 344 return num;
duke@435 345 }
duke@435 346 #endif
duke@435 347
duke@435 348 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 349 FreeChunk* fc = (FreeChunk*) p;
duke@435 350 return fc->isFree();
duke@435 351 }
duke@435 352
duke@435 353 size_t CompactibleFreeListSpace::used() const {
duke@435 354 return capacity() - free();
duke@435 355 }
duke@435 356
duke@435 357 size_t CompactibleFreeListSpace::free() const {
duke@435 358 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 359 // if you do this while the structures are in flux you
duke@435 360 // may get an approximate answer only; for instance
duke@435 361 // because there is concurrent allocation either
duke@435 362 // directly by mutators or for promotion during a GC.
duke@435 363 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 364 // not to crash and burn, for instance, because of walking
duke@435 365 // pointers that could disappear as you were walking them.
duke@435 366 // The approximation is because the various components
duke@435 367 // that are read below are not read atomically (and
duke@435 368 // further the computation of totalSizeInIndexedFreeLists()
duke@435 369 // is itself a non-atomic computation. The normal use of
duke@435 370 // this is during a resize operation at the end of GC
duke@435 371 // and at that time you are guaranteed to get the
duke@435 372 // correct actual value. However, for instance, this is
duke@435 373 // also read completely asynchronously by the "perf-sampler"
duke@435 374 // that supports jvmstat, and you are apt to see the values
duke@435 375 // flicker in such cases.
duke@435 376 assert(_dictionary != NULL, "No _dictionary?");
duke@435 377 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 378 totalSizeInIndexedFreeLists() +
duke@435 379 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 380 }
duke@435 381
duke@435 382 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 383 assert(_dictionary != NULL, "No _dictionary?");
duke@435 384 assert_locked();
duke@435 385 size_t res = _dictionary->maxChunkSize();
duke@435 386 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 387 (size_t) SmallForLinearAlloc - 1));
duke@435 388 // XXX the following could potentially be pretty slow;
duke@435 389 // should one, pesimally for the rare cases when res
duke@435 390 // caclulated above is less than IndexSetSize,
duke@435 391 // just return res calculated above? My reasoning was that
duke@435 392 // those cases will be so rare that the extra time spent doesn't
duke@435 393 // really matter....
duke@435 394 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 395 // to i > res below, because i is unsigned and res may be zero.
duke@435 396 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 397 i -= IndexSetStride) {
duke@435 398 if (_indexedFreeList[i].head() != NULL) {
duke@435 399 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 400 return i;
duke@435 401 }
duke@435 402 }
duke@435 403 return res;
duke@435 404 }
duke@435 405
ysr@2071 406 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 407 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 408 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
ysr@2071 409 _ptr, _word_size, _refillSize, _allocation_size_limit);
ysr@2071 410 }
ysr@2071 411
ysr@2071 412 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 413 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 414 st->print_cr(" Space:");
ysr@2071 415 Space::print_on(st);
ysr@2071 416
ysr@2071 417 st->print_cr("promoInfo:");
ysr@2071 418 _promoInfo.print_on(st);
ysr@2071 419
ysr@2071 420 st->print_cr("_smallLinearAllocBlock");
ysr@2071 421 _smallLinearAllocBlock.print_on(st);
ysr@2071 422
ysr@2071 423 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 424
ysr@2071 425 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 426 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 427 }
ysr@2071 428
ysr@1580 429 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 430 const {
ysr@1580 431 reportIndexedFreeListStatistics();
ysr@1580 432 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 433 gclog_or_tty->print_cr("---------------------------");
ysr@1580 434 FreeList::print_labels_on(st, "size");
ysr@1580 435 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 436 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 437 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 438 fc = fc->next()) {
ysr@1580 439 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 440 fc, (HeapWord*)fc + i,
ysr@1580 441 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 442 }
ysr@1580 443 }
ysr@1580 444 }
ysr@1580 445
ysr@1580 446 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 447 const {
ysr@1580 448 _promoInfo.print_on(st);
ysr@1580 449 }
ysr@1580 450
ysr@1580 451 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 452 const {
ysr@1580 453 _dictionary->reportStatistics();
ysr@1580 454 st->print_cr("Layout of Freelists in Tree");
ysr@1580 455 st->print_cr("---------------------------");
ysr@1580 456 _dictionary->print_free_lists(st);
ysr@1580 457 }
ysr@1580 458
ysr@1580 459 class BlkPrintingClosure: public BlkClosure {
ysr@1580 460 const CMSCollector* _collector;
ysr@1580 461 const CompactibleFreeListSpace* _sp;
ysr@1580 462 const CMSBitMap* _live_bit_map;
ysr@1580 463 const bool _post_remark;
ysr@1580 464 outputStream* _st;
ysr@1580 465 public:
ysr@1580 466 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 467 const CompactibleFreeListSpace* sp,
ysr@1580 468 const CMSBitMap* live_bit_map,
ysr@1580 469 outputStream* st):
ysr@1580 470 _collector(collector),
ysr@1580 471 _sp(sp),
ysr@1580 472 _live_bit_map(live_bit_map),
ysr@1580 473 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 474 _st(st) { }
ysr@1580 475 size_t do_blk(HeapWord* addr);
ysr@1580 476 };
ysr@1580 477
ysr@1580 478 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 479 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 480 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 481 if (_sp->block_is_obj(addr)) {
ysr@1580 482 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 483 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 484 addr,
ysr@1580 485 dead ? "dead" : "live",
ysr@1580 486 sz,
ysr@1580 487 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 488 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 489 oop(addr)->print_on(_st);
ysr@1580 490 _st->print_cr("--------------------------------------");
ysr@1580 491 }
ysr@1580 492 } else { // free block
ysr@1580 493 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 494 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 495 if (CMSPrintChunksInDump) {
ysr@1580 496 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 497 _st->print_cr("--------------------------------------");
ysr@1580 498 }
ysr@1580 499 }
ysr@1580 500 return sz;
ysr@1580 501 }
ysr@1580 502
ysr@1580 503 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 504 outputStream* st) {
ysr@1580 505 st->print_cr("\n=========================");
ysr@1580 506 st->print_cr("Block layout in CMS Heap:");
ysr@1580 507 st->print_cr("=========================");
ysr@1580 508 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 509 blk_iterate(&bpcl);
ysr@1580 510
ysr@1580 511 st->print_cr("\n=======================================");
ysr@1580 512 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 513 st->print_cr("=======================================");
ysr@1580 514 print_promo_info_blocks(st);
ysr@1580 515
ysr@1580 516 st->print_cr("\n===========================");
ysr@1580 517 st->print_cr("Order of Indexed Free Lists");
ysr@1580 518 st->print_cr("=========================");
ysr@1580 519 print_indexed_free_lists(st);
ysr@1580 520
ysr@1580 521 st->print_cr("\n=================================");
ysr@1580 522 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 523 st->print_cr("=================================");
ysr@1580 524 print_dictionary_free_lists(st);
ysr@1580 525 }
ysr@1580 526
ysr@1580 527
duke@435 528 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 529 assert_lock_strong(&_freelistLock);
duke@435 530 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 531 _dictionary->reportStatistics();
duke@435 532 if (PrintFLSStatistics > 1) {
duke@435 533 reportIndexedFreeListStatistics();
duke@435 534 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 535 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 536 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 537 }
duke@435 538 }
duke@435 539
duke@435 540 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 541 assert_lock_strong(&_freelistLock);
duke@435 542 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 543 "--------------------------------\n");
duke@435 544 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 545 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 546 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 547 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 548 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 549 if (freeBlocks != 0) {
duke@435 550 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 551 }
duke@435 552 }
duke@435 553
duke@435 554 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 555 size_t res = 0;
duke@435 556 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 557 debug_only(
duke@435 558 ssize_t recount = 0;
duke@435 559 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 560 fc = fc->next()) {
duke@435 561 recount += 1;
duke@435 562 }
duke@435 563 assert(recount == _indexedFreeList[i].count(),
duke@435 564 "Incorrect count in list");
duke@435 565 )
duke@435 566 res += _indexedFreeList[i].count();
duke@435 567 }
duke@435 568 return res;
duke@435 569 }
duke@435 570
duke@435 571 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 572 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 573 if (_indexedFreeList[i].head() != NULL) {
duke@435 574 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 575 return (size_t)i;
duke@435 576 }
duke@435 577 }
duke@435 578 return 0;
duke@435 579 }
duke@435 580
duke@435 581 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 582 HeapWord* prevEnd = end();
duke@435 583 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 584 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 585 "New end is below unallocated block");
duke@435 586 _end = value;
duke@435 587 if (prevEnd != NULL) {
duke@435 588 // Resize the underlying block offset table.
duke@435 589 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 590 if (value <= prevEnd) {
ysr@2071 591 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 592 "New end is below unallocated block");
ysr@1580 593 } else {
ysr@1580 594 // Now, take this new chunk and add it to the free blocks.
ysr@1580 595 // Note that the BOT has not yet been updated for this block.
ysr@1580 596 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 597 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 598 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 599 // Mark the boundary of the new block in BOT
ysr@1580 600 _bt.mark_block(prevEnd, value);
ysr@1580 601 // put it all in the linAB
ysr@1580 602 if (ParallelGCThreads == 0) {
ysr@1580 603 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 604 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 605 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 606 } else { // ParallelGCThreads > 0
ysr@1580 607 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 608 Mutex::_no_safepoint_check_flag);
ysr@1580 609 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 610 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 611 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 612 }
ysr@1580 613 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 614 // of chunks as they are allocated out of a LinAB are.
ysr@1580 615 } else {
ysr@1580 616 // Add the block to the free lists, if possible coalescing it
ysr@1580 617 // with the last free block, and update the BOT and census data.
ysr@1580 618 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 619 }
duke@435 620 }
duke@435 621 }
duke@435 622 }
duke@435 623
duke@435 624 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 625 CompactibleFreeListSpace* _cfls;
duke@435 626 CMSCollector* _collector;
duke@435 627 protected:
duke@435 628 // Override.
duke@435 629 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 630 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 631 HeapWord* bottom, HeapWord* top, \
duke@435 632 ClosureType* cl); \
duke@435 633 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 634 HeapWord* bottom, HeapWord* top, \
duke@435 635 ClosureType* cl); \
duke@435 636 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 637 HeapWord* bottom, HeapWord* top, \
duke@435 638 ClosureType* cl)
duke@435 639 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 640 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 641
duke@435 642 public:
duke@435 643 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 644 CMSCollector* collector,
duke@435 645 OopClosure* cl,
duke@435 646 CardTableModRefBS::PrecisionStyle precision,
duke@435 647 HeapWord* boundary) :
duke@435 648 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 649 _cfls(sp), _collector(collector) {}
duke@435 650 };
duke@435 651
duke@435 652 // We de-virtualize the block-related calls below, since we know that our
duke@435 653 // space is a CompactibleFreeListSpace.
duke@435 654 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 655 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 656 HeapWord* bottom, \
duke@435 657 HeapWord* top, \
duke@435 658 ClosureType* cl) { \
duke@435 659 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 660 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 661 } else { \
duke@435 662 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 663 } \
duke@435 664 } \
duke@435 665 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 666 HeapWord* bottom, \
duke@435 667 HeapWord* top, \
duke@435 668 ClosureType* cl) { \
duke@435 669 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 670 back too far. */ \
duke@435 671 HeapWord* mr_start = mr.start(); \
duke@435 672 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 673 HeapWord* next = bottom + bot_size; \
duke@435 674 while (next < mr_start) { \
duke@435 675 bottom = next; \
duke@435 676 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 677 next = bottom + bot_size; \
duke@435 678 } \
duke@435 679 \
duke@435 680 while (bottom < top) { \
duke@435 681 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 682 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 683 oop(bottom)) && \
duke@435 684 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 685 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 686 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 687 } else { \
duke@435 688 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 689 } \
duke@435 690 } \
duke@435 691 } \
duke@435 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 693 HeapWord* bottom, \
duke@435 694 HeapWord* top, \
duke@435 695 ClosureType* cl) { \
duke@435 696 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 697 back too far. */ \
duke@435 698 HeapWord* mr_start = mr.start(); \
duke@435 699 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 700 HeapWord* next = bottom + bot_size; \
duke@435 701 while (next < mr_start) { \
duke@435 702 bottom = next; \
duke@435 703 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 704 next = bottom + bot_size; \
duke@435 705 } \
duke@435 706 \
duke@435 707 while (bottom < top) { \
duke@435 708 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 709 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 710 oop(bottom)) && \
duke@435 711 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 712 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 713 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 714 } else { \
duke@435 715 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 716 } \
duke@435 717 } \
duke@435 718 }
duke@435 719
duke@435 720 // (There are only two of these, rather than N, because the split is due
duke@435 721 // only to the introduction of the FilteringClosure, a local part of the
duke@435 722 // impl of this abstraction.)
duke@435 723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 724 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 725
duke@435 726 DirtyCardToOopClosure*
duke@435 727 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 728 CardTableModRefBS::PrecisionStyle precision,
duke@435 729 HeapWord* boundary) {
duke@435 730 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 731 }
duke@435 732
duke@435 733
duke@435 734 // Note on locking for the space iteration functions:
duke@435 735 // since the collector's iteration activities are concurrent with
duke@435 736 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 737 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 738 // may suddenly be allocated or divided up and part of it allocated and
duke@435 739 // so on.
duke@435 740
duke@435 741 // Apply the given closure to each block in the space.
duke@435 742 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 743 assert_lock_strong(freelistLock());
duke@435 744 HeapWord *cur, *limit;
duke@435 745 for (cur = bottom(), limit = end(); cur < limit;
duke@435 746 cur += cl->do_blk_careful(cur));
duke@435 747 }
duke@435 748
duke@435 749 // Apply the given closure to each block in the space.
duke@435 750 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 751 assert_lock_strong(freelistLock());
duke@435 752 HeapWord *cur, *limit;
duke@435 753 for (cur = bottom(), limit = end(); cur < limit;
duke@435 754 cur += cl->do_blk(cur));
duke@435 755 }
duke@435 756
duke@435 757 // Apply the given closure to each oop in the space.
duke@435 758 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 759 assert_lock_strong(freelistLock());
duke@435 760 HeapWord *cur, *limit;
duke@435 761 size_t curSize;
duke@435 762 for (cur = bottom(), limit = end(); cur < limit;
duke@435 763 cur += curSize) {
duke@435 764 curSize = block_size(cur);
duke@435 765 if (block_is_obj(cur)) {
duke@435 766 oop(cur)->oop_iterate(cl);
duke@435 767 }
duke@435 768 }
duke@435 769 }
duke@435 770
duke@435 771 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 772 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 773 assert_lock_strong(freelistLock());
duke@435 774 if (is_empty()) {
duke@435 775 return;
duke@435 776 }
duke@435 777 MemRegion cur = MemRegion(bottom(), end());
duke@435 778 mr = mr.intersection(cur);
duke@435 779 if (mr.is_empty()) {
duke@435 780 return;
duke@435 781 }
duke@435 782 if (mr.equals(cur)) {
duke@435 783 oop_iterate(cl);
duke@435 784 return;
duke@435 785 }
duke@435 786 assert(mr.end() <= end(), "just took an intersection above");
duke@435 787 HeapWord* obj_addr = block_start(mr.start());
duke@435 788 HeapWord* t = mr.end();
duke@435 789
duke@435 790 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 791 if (block_is_obj(obj_addr)) {
duke@435 792 // Handle first object specially.
duke@435 793 oop obj = oop(obj_addr);
duke@435 794 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 795 } else {
duke@435 796 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 797 obj_addr += fc->size();
duke@435 798 }
duke@435 799 while (obj_addr < t) {
duke@435 800 HeapWord* obj = obj_addr;
duke@435 801 obj_addr += block_size(obj_addr);
duke@435 802 // If "obj_addr" is not greater than top, then the
duke@435 803 // entire object "obj" is within the region.
duke@435 804 if (obj_addr <= t) {
duke@435 805 if (block_is_obj(obj)) {
duke@435 806 oop(obj)->oop_iterate(cl);
duke@435 807 }
duke@435 808 } else {
duke@435 809 // "obj" extends beyond end of region
duke@435 810 if (block_is_obj(obj)) {
duke@435 811 oop(obj)->oop_iterate(&smr_blk);
duke@435 812 }
duke@435 813 break;
duke@435 814 }
duke@435 815 }
duke@435 816 }
duke@435 817
duke@435 818 // NOTE: In the following methods, in order to safely be able to
duke@435 819 // apply the closure to an object, we need to be sure that the
duke@435 820 // object has been initialized. We are guaranteed that an object
duke@435 821 // is initialized if we are holding the Heap_lock with the
duke@435 822 // world stopped.
duke@435 823 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 824 if (is_init_completed()) {
duke@435 825 assert_locked_or_safepoint(Heap_lock);
duke@435 826 if (Universe::is_fully_initialized()) {
duke@435 827 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 828 "Required for objects to be initialized");
duke@435 829 }
duke@435 830 } // else make a concession at vm start-up
duke@435 831 }
duke@435 832
duke@435 833 // Apply the given closure to each object in the space
duke@435 834 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 835 assert_lock_strong(freelistLock());
duke@435 836 NOT_PRODUCT(verify_objects_initialized());
duke@435 837 HeapWord *cur, *limit;
duke@435 838 size_t curSize;
duke@435 839 for (cur = bottom(), limit = end(); cur < limit;
duke@435 840 cur += curSize) {
duke@435 841 curSize = block_size(cur);
duke@435 842 if (block_is_obj(cur)) {
duke@435 843 blk->do_object(oop(cur));
duke@435 844 }
duke@435 845 }
duke@435 846 }
duke@435 847
jmasa@952 848 // Apply the given closure to each live object in the space
jmasa@952 849 // The usage of CompactibleFreeListSpace
jmasa@952 850 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 851 // objects in the space with references to objects that are no longer
jmasa@952 852 // valid. For example, an object may reference another object
jmasa@952 853 // that has already been sweep up (collected). This method uses
jmasa@952 854 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 855 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 856 // object is decided.
jmasa@952 857
jmasa@952 858 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 859 assert_lock_strong(freelistLock());
jmasa@952 860 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 861 HeapWord *cur, *limit;
jmasa@952 862 size_t curSize;
jmasa@952 863 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 864 cur += curSize) {
jmasa@952 865 curSize = block_size(cur);
jmasa@952 866 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 867 blk->do_object(oop(cur));
jmasa@952 868 }
jmasa@952 869 }
jmasa@952 870 }
jmasa@952 871
duke@435 872 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 873 UpwardsObjectClosure* cl) {
ysr@1580 874 assert_locked(freelistLock());
duke@435 875 NOT_PRODUCT(verify_objects_initialized());
duke@435 876 Space::object_iterate_mem(mr, cl);
duke@435 877 }
duke@435 878
duke@435 879 // Callers of this iterator beware: The closure application should
duke@435 880 // be robust in the face of uninitialized objects and should (always)
duke@435 881 // return a correct size so that the next addr + size below gives us a
duke@435 882 // valid block boundary. [See for instance,
duke@435 883 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 884 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 885 HeapWord*
duke@435 886 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 887 assert_lock_strong(freelistLock());
duke@435 888 HeapWord *addr, *last;
duke@435 889 size_t size;
duke@435 890 for (addr = bottom(), last = end();
duke@435 891 addr < last; addr += size) {
duke@435 892 FreeChunk* fc = (FreeChunk*)addr;
duke@435 893 if (fc->isFree()) {
duke@435 894 // Since we hold the free list lock, which protects direct
duke@435 895 // allocation in this generation by mutators, a free object
duke@435 896 // will remain free throughout this iteration code.
duke@435 897 size = fc->size();
duke@435 898 } else {
duke@435 899 // Note that the object need not necessarily be initialized,
duke@435 900 // because (for instance) the free list lock does NOT protect
duke@435 901 // object initialization. The closure application below must
duke@435 902 // therefore be correct in the face of uninitialized objects.
duke@435 903 size = cl->do_object_careful(oop(addr));
duke@435 904 if (size == 0) {
duke@435 905 // An unparsable object found. Signal early termination.
duke@435 906 return addr;
duke@435 907 }
duke@435 908 }
duke@435 909 }
duke@435 910 return NULL;
duke@435 911 }
duke@435 912
duke@435 913 // Callers of this iterator beware: The closure application should
duke@435 914 // be robust in the face of uninitialized objects and should (always)
duke@435 915 // return a correct size so that the next addr + size below gives us a
duke@435 916 // valid block boundary. [See for instance,
duke@435 917 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 918 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 919 HeapWord*
duke@435 920 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 921 ObjectClosureCareful* cl) {
duke@435 922 assert_lock_strong(freelistLock());
duke@435 923 // Can't use used_region() below because it may not necessarily
duke@435 924 // be the same as [bottom(),end()); although we could
duke@435 925 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 926 // that appears too cumbersome, so we just do the simpler check
duke@435 927 // in the assertion below.
duke@435 928 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 929 "mr should be non-empty and within used space");
duke@435 930 HeapWord *addr, *end;
duke@435 931 size_t size;
duke@435 932 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 933 addr < end; addr += size) {
duke@435 934 FreeChunk* fc = (FreeChunk*)addr;
duke@435 935 if (fc->isFree()) {
duke@435 936 // Since we hold the free list lock, which protects direct
duke@435 937 // allocation in this generation by mutators, a free object
duke@435 938 // will remain free throughout this iteration code.
duke@435 939 size = fc->size();
duke@435 940 } else {
duke@435 941 // Note that the object need not necessarily be initialized,
duke@435 942 // because (for instance) the free list lock does NOT protect
duke@435 943 // object initialization. The closure application below must
duke@435 944 // therefore be correct in the face of uninitialized objects.
duke@435 945 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 946 if (size == 0) {
duke@435 947 // An unparsable object found. Signal early termination.
duke@435 948 return addr;
duke@435 949 }
duke@435 950 }
duke@435 951 }
duke@435 952 return NULL;
duke@435 953 }
duke@435 954
duke@435 955
ysr@777 956 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 957 NOT_PRODUCT(verify_objects_initialized());
duke@435 958 return _bt.block_start(p);
duke@435 959 }
duke@435 960
duke@435 961 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 962 return _bt.block_start_careful(p);
duke@435 963 }
duke@435 964
duke@435 965 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 966 NOT_PRODUCT(verify_objects_initialized());
duke@435 967 // This must be volatile, or else there is a danger that the compiler
duke@435 968 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 969 // the value read the first time in a register.
duke@435 970 while (true) {
duke@435 971 // We must do this until we get a consistent view of the object.
coleenp@622 972 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 973 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 974 size_t res = fc->size();
coleenp@622 975 // If the object is still a free chunk, return the size, else it
coleenp@622 976 // has been allocated so try again.
coleenp@622 977 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 978 assert(res != 0, "Block size should not be 0");
duke@435 979 return res;
duke@435 980 }
coleenp@622 981 } else {
coleenp@622 982 // must read from what 'p' points to in each loop.
coleenp@622 983 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 984 if (k != NULL) {
ysr@2071 985 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
coleenp@622 986 oop o = (oop)p;
coleenp@622 987 assert(o->is_parsable(), "Should be parsable");
coleenp@622 988 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 989 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 990 res = adjustObjectSize(res);
coleenp@622 991 assert(res != 0, "Block size should not be 0");
coleenp@622 992 return res;
coleenp@622 993 }
duke@435 994 }
duke@435 995 }
duke@435 996 }
duke@435 997
duke@435 998 // A variant of the above that uses the Printezis bits for
duke@435 999 // unparsable but allocated objects. This avoids any possible
duke@435 1000 // stalls waiting for mutators to initialize objects, and is
duke@435 1001 // thus potentially faster than the variant above. However,
duke@435 1002 // this variant may return a zero size for a block that is
duke@435 1003 // under mutation and for which a consistent size cannot be
duke@435 1004 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1005 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1006 const CMSCollector* c)
duke@435 1007 const {
duke@435 1008 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1009 // This must be volatile, or else there is a danger that the compiler
duke@435 1010 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1011 // the value read the first time in a register.
duke@435 1012 DEBUG_ONLY(uint loops = 0;)
duke@435 1013 while (true) {
duke@435 1014 // We must do this until we get a consistent view of the object.
coleenp@622 1015 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1016 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1017 size_t res = fc->size();
coleenp@622 1018 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1019 assert(res != 0, "Block size should not be 0");
duke@435 1020 assert(loops == 0, "Should be 0");
duke@435 1021 return res;
duke@435 1022 }
duke@435 1023 } else {
coleenp@622 1024 // must read from what 'p' points to in each loop.
coleenp@622 1025 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
jmasa@953 1026 if (k != NULL &&
jmasa@953 1027 ((oopDesc*)p)->is_parsable() &&
jmasa@953 1028 ((oopDesc*)p)->is_conc_safe()) {
coleenp@622 1029 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 1030 oop o = (oop)p;
coleenp@622 1031 assert(o->is_oop(), "Should be an oop");
coleenp@622 1032 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1033 res = adjustObjectSize(res);
coleenp@622 1034 assert(res != 0, "Block size should not be 0");
coleenp@622 1035 return res;
coleenp@622 1036 } else {
coleenp@622 1037 return c->block_size_if_printezis_bits(p);
coleenp@622 1038 }
duke@435 1039 }
duke@435 1040 assert(loops == 0, "Can loop at most once");
duke@435 1041 DEBUG_ONLY(loops++;)
duke@435 1042 }
duke@435 1043 }
duke@435 1044
duke@435 1045 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1046 NOT_PRODUCT(verify_objects_initialized());
duke@435 1047 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1048 FreeChunk* fc = (FreeChunk*)p;
duke@435 1049 if (fc->isFree()) {
duke@435 1050 return fc->size();
duke@435 1051 } else {
duke@435 1052 // Ignore mark word because this may be a recently promoted
duke@435 1053 // object whose mark word is used to chain together grey
duke@435 1054 // objects (the last one would have a null value).
duke@435 1055 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1056 return adjustObjectSize(oop(p)->size());
duke@435 1057 }
duke@435 1058 }
duke@435 1059
duke@435 1060 // This implementation assumes that the property of "being an object" is
duke@435 1061 // stable. But being a free chunk may not be (because of parallel
duke@435 1062 // promotion.)
duke@435 1063 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1064 FreeChunk* fc = (FreeChunk*)p;
duke@435 1065 assert(is_in_reserved(p), "Should be in space");
duke@435 1066 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1067 // assertion may fail because prepare_for_compaction() uses
duke@435 1068 // space that is garbage to maintain information on ranges of
duke@435 1069 // live objects so that these live ranges can be moved as a whole.
duke@435 1070 // Comment out this assertion until that problem can be solved
duke@435 1071 // (i.e., that the block start calculation may look at objects
duke@435 1072 // at address below "p" in finding the object that contains "p"
duke@435 1073 // and those objects (if garbage) may have been modified to hold
duke@435 1074 // live range information.
jmasa@2188 1075 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1076 // "Should be a block boundary");
coleenp@622 1077 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1078 klassOop k = oop(p)->klass_or_null();
duke@435 1079 if (k != NULL) {
duke@435 1080 // Ignore mark word because it may have been used to
duke@435 1081 // chain together promoted objects (the last one
duke@435 1082 // would have a null value).
duke@435 1083 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1084 return true;
duke@435 1085 } else {
duke@435 1086 return false; // Was not an object at the start of collection.
duke@435 1087 }
duke@435 1088 }
duke@435 1089
duke@435 1090 // Check if the object is alive. This fact is checked either by consulting
duke@435 1091 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1092 // generation and we're not in the sweeping phase, by checking the
duke@435 1093 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1094 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1095 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2293 1096 assert(block_is_obj(p), "The address should point to an object");
ysr@2293 1097 assert(SafepointSynchronize::is_at_safepoint(), "Else races are possible");
duke@435 1098
duke@435 1099 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1100 // for both perm gen and old gen.
duke@435 1101 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1102 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1103 // main marking bit map (live_map below) is locked,
duke@435 1104 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1105 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1106 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1107 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1108 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1109 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1110 // if/when the perm gen goes away in the future.
duke@435 1111 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1112 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1113 return live_map->par_isMarked((HeapWord*) p);
duke@435 1114 } else {
duke@435 1115 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1116 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1117 // in the perm gen. In this case, we use the "deadness" information
duke@435 1118 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1119 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1120 if (_collector->verifying()) {
duke@435 1121 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1122 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1123 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1124 // the object is alive.
duke@435 1125 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1126 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1127 } else {
duke@435 1128 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1129 }
duke@435 1130 }
duke@435 1131 }
duke@435 1132 return true;
duke@435 1133 }
duke@435 1134
duke@435 1135 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1136 FreeChunk* fc = (FreeChunk*)p;
duke@435 1137 assert(is_in_reserved(p), "Should be in space");
duke@435 1138 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 1139 if (!fc->isFree()) {
duke@435 1140 // Ignore mark word because it may have been used to
duke@435 1141 // chain together promoted objects (the last one
duke@435 1142 // would have a null value).
duke@435 1143 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1144 return true;
duke@435 1145 }
duke@435 1146 return false;
duke@435 1147 }
duke@435 1148
duke@435 1149 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1150 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1151 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1152 size_t size = 0;
duke@435 1153 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1154 debug_only(
duke@435 1155 // We may be calling here without the lock in which case we
duke@435 1156 // won't do this modest sanity check.
duke@435 1157 if (freelistLock()->owned_by_self()) {
duke@435 1158 size_t total_list_size = 0;
duke@435 1159 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1160 fc = fc->next()) {
duke@435 1161 total_list_size += i;
duke@435 1162 }
duke@435 1163 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1164 "Count in list is incorrect");
duke@435 1165 }
duke@435 1166 )
duke@435 1167 size += i * _indexedFreeList[i].count();
duke@435 1168 }
duke@435 1169 return size;
duke@435 1170 }
duke@435 1171
duke@435 1172 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1173 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1174 return allocate(size);
duke@435 1175 }
duke@435 1176
duke@435 1177 HeapWord*
duke@435 1178 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1179 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1180 }
duke@435 1181
duke@435 1182 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1183 assert_lock_strong(freelistLock());
duke@435 1184 HeapWord* res = NULL;
duke@435 1185 assert(size == adjustObjectSize(size),
duke@435 1186 "use adjustObjectSize() before calling into allocate()");
duke@435 1187
duke@435 1188 if (_adaptive_freelists) {
duke@435 1189 res = allocate_adaptive_freelists(size);
duke@435 1190 } else { // non-adaptive free lists
duke@435 1191 res = allocate_non_adaptive_freelists(size);
duke@435 1192 }
duke@435 1193
duke@435 1194 if (res != NULL) {
duke@435 1195 // check that res does lie in this space!
duke@435 1196 assert(is_in_reserved(res), "Not in this space!");
duke@435 1197 assert(is_aligned((void*)res), "alignment check");
duke@435 1198
duke@435 1199 FreeChunk* fc = (FreeChunk*)res;
duke@435 1200 fc->markNotFree();
duke@435 1201 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1202 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1203 // Verify that the block offset table shows this to
duke@435 1204 // be a single block, but not one which is unallocated.
duke@435 1205 _bt.verify_single_block(res, size);
duke@435 1206 _bt.verify_not_unallocated(res, size);
duke@435 1207 // mangle a just allocated object with a distinct pattern.
duke@435 1208 debug_only(fc->mangleAllocated(size));
duke@435 1209 }
duke@435 1210
duke@435 1211 return res;
duke@435 1212 }
duke@435 1213
duke@435 1214 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1215 HeapWord* res = NULL;
duke@435 1216 // try and use linear allocation for smaller blocks
duke@435 1217 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1218 // if successful, the following also adjusts block offset table
duke@435 1219 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1220 }
duke@435 1221 // Else triage to indexed lists for smaller sizes
duke@435 1222 if (res == NULL) {
duke@435 1223 if (size < SmallForDictionary) {
duke@435 1224 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1225 } else {
duke@435 1226 // else get it from the big dictionary; if even this doesn't
duke@435 1227 // work we are out of luck.
duke@435 1228 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1229 }
duke@435 1230 }
duke@435 1231
duke@435 1232 return res;
duke@435 1233 }
duke@435 1234
duke@435 1235 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1236 assert_lock_strong(freelistLock());
duke@435 1237 HeapWord* res = NULL;
duke@435 1238 assert(size == adjustObjectSize(size),
duke@435 1239 "use adjustObjectSize() before calling into allocate()");
duke@435 1240
duke@435 1241 // Strategy
duke@435 1242 // if small
duke@435 1243 // exact size from small object indexed list if small
duke@435 1244 // small or large linear allocation block (linAB) as appropriate
duke@435 1245 // take from lists of greater sized chunks
duke@435 1246 // else
duke@435 1247 // dictionary
duke@435 1248 // small or large linear allocation block if it has the space
duke@435 1249 // Try allocating exact size from indexTable first
duke@435 1250 if (size < IndexSetSize) {
duke@435 1251 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1252 if(res != NULL) {
duke@435 1253 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1254 "Not removed from free list");
duke@435 1255 // no block offset table adjustment is necessary on blocks in
duke@435 1256 // the indexed lists.
duke@435 1257
duke@435 1258 // Try allocating from the small LinAB
duke@435 1259 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1260 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1261 // if successful, the above also adjusts block offset table
duke@435 1262 // Note that this call will refill the LinAB to
duke@435 1263 // satisfy the request. This is different that
duke@435 1264 // evm.
duke@435 1265 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1266 } else {
duke@435 1267 // Raid the exact free lists larger than size, even if they are not
duke@435 1268 // overpopulated.
duke@435 1269 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1270 }
duke@435 1271 } else {
duke@435 1272 // Big objects get allocated directly from the dictionary.
duke@435 1273 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1274 if (res == NULL) {
duke@435 1275 // Try hard not to fail since an allocation failure will likely
duke@435 1276 // trigger a synchronous GC. Try to get the space from the
duke@435 1277 // allocation blocks.
duke@435 1278 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1279 }
duke@435 1280 }
duke@435 1281
duke@435 1282 return res;
duke@435 1283 }
duke@435 1284
duke@435 1285 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1286 // when promoting obj.
duke@435 1287 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1288 // Depending on the object size, expansion may require refilling either a
duke@435 1289 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1290 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1291 size_t space = obj_size;
duke@435 1292 if (!_adaptive_freelists) {
duke@435 1293 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1294 }
duke@435 1295 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1296 return space;
duke@435 1297 }
duke@435 1298
duke@435 1299 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1300 FreeChunk* ret;
duke@435 1301
duke@435 1302 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1303 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1304 "Should not be here");
duke@435 1305
duke@435 1306 size_t i;
duke@435 1307 size_t currSize = numWords + MinChunkSize;
duke@435 1308 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1309 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1310 FreeList* fl = &_indexedFreeList[i];
duke@435 1311 if (fl->head()) {
duke@435 1312 ret = getFromListGreater(fl, numWords);
duke@435 1313 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1314 return ret;
duke@435 1315 }
duke@435 1316 }
duke@435 1317
duke@435 1318 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1319 (size_t)(numWords + MinChunkSize));
duke@435 1320
duke@435 1321 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1322 fragmentation that can't be handled. */
duke@435 1323 {
duke@435 1324 ret = dictionary()->getChunk(currSize);
duke@435 1325 if (ret != NULL) {
duke@435 1326 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1327 "Chunk is too small");
duke@435 1328 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1329 /* Carve returned chunk. */
duke@435 1330 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1331 /* Label this as no longer a free chunk. */
duke@435 1332 assert(ret->isFree(), "This chunk should be free");
duke@435 1333 ret->linkPrev(NULL);
duke@435 1334 }
duke@435 1335 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1336 return ret;
duke@435 1337 }
duke@435 1338 ShouldNotReachHere();
duke@435 1339 }
duke@435 1340
duke@435 1341 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
duke@435 1342 const {
duke@435 1343 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1344 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1345 }
duke@435 1346
duke@435 1347 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
duke@435 1348 if (fc->size() >= IndexSetSize) {
duke@435 1349 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1350 } else {
duke@435 1351 return verifyChunkInIndexedFreeLists(fc);
duke@435 1352 }
duke@435 1353 }
duke@435 1354
duke@435 1355 #ifndef PRODUCT
duke@435 1356 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1357 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1358 }
ysr@1580 1359
ysr@1580 1360 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1361 CMSLockVerifier::assert_locked(lock);
ysr@1580 1362 }
duke@435 1363 #endif
duke@435 1364
duke@435 1365 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1366 // In the parallel case, the main thread holds the free list lock
duke@435 1367 // on behalf the parallel threads.
duke@435 1368 FreeChunk* fc;
duke@435 1369 {
duke@435 1370 // If GC is parallel, this might be called by several threads.
duke@435 1371 // This should be rare enough that the locking overhead won't affect
duke@435 1372 // the sequential code.
duke@435 1373 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1374 Mutex::_no_safepoint_check_flag);
duke@435 1375 fc = getChunkFromDictionary(size);
duke@435 1376 }
duke@435 1377 if (fc != NULL) {
duke@435 1378 fc->dontCoalesce();
duke@435 1379 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1380 // Verify that the block offset table shows this to
duke@435 1381 // be a single block, but not one which is unallocated.
duke@435 1382 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1383 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1384 }
duke@435 1385 return fc;
duke@435 1386 }
duke@435 1387
coleenp@548 1388 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1389 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1390 assert_locked();
duke@435 1391
duke@435 1392 // if we are tracking promotions, then first ensure space for
duke@435 1393 // promotion (including spooling space for saving header if necessary).
duke@435 1394 // then allocate and copy, then track promoted info if needed.
duke@435 1395 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1396 // be displaced and in this case restoration of the mark word
duke@435 1397 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1398 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1399 return NULL;
duke@435 1400 }
duke@435 1401 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1402 // additional call through the allocate(size_t) form. Having
duke@435 1403 // the compile inline the call is problematic because allocate(size_t)
duke@435 1404 // is a virtual method.
duke@435 1405 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1406 if (res != NULL) {
duke@435 1407 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1408 // if we should be tracking promotions, do so.
duke@435 1409 if (_promoInfo.tracking()) {
duke@435 1410 _promoInfo.track((PromotedObject*)res);
duke@435 1411 }
duke@435 1412 }
duke@435 1413 return oop(res);
duke@435 1414 }
duke@435 1415
duke@435 1416 HeapWord*
duke@435 1417 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1418 assert_locked();
duke@435 1419 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1420 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1421 "maximum from smallLinearAllocBlock");
duke@435 1422 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1423 }
duke@435 1424
duke@435 1425 HeapWord*
duke@435 1426 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1427 size_t size) {
duke@435 1428 assert_locked();
duke@435 1429 assert(size >= MinChunkSize, "too small");
duke@435 1430 HeapWord* res = NULL;
duke@435 1431 // Try to do linear allocation from blk, making sure that
duke@435 1432 if (blk->_word_size == 0) {
duke@435 1433 // We have probably been unable to fill this either in the prologue or
duke@435 1434 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1435 // next time.
duke@435 1436 assert(blk->_ptr == NULL, "consistency check");
duke@435 1437 return NULL;
duke@435 1438 }
duke@435 1439 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1440 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1441 if (res != NULL) return res;
duke@435 1442
duke@435 1443 // about to exhaust this linear allocation block
duke@435 1444 if (blk->_word_size == size) { // exactly satisfied
duke@435 1445 res = blk->_ptr;
duke@435 1446 _bt.allocated(res, blk->_word_size);
duke@435 1447 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1448 size_t sz = blk->_word_size;
duke@435 1449 // Update _unallocated_block if the size is such that chunk would be
duke@435 1450 // returned to the indexed free list. All other chunks in the indexed
duke@435 1451 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1452 // has already been adjusted for them. Do it here so that the cost
duke@435 1453 // for all chunks added back to the indexed free lists.
ysr@1580 1454 if (sz < SmallForDictionary) {
ysr@1580 1455 _bt.allocated(blk->_ptr, sz);
duke@435 1456 }
duke@435 1457 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1458 addChunkToFreeLists(blk->_ptr, sz);
ysr@1580 1459 splitBirth(sz);
duke@435 1460 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1461 } else {
duke@435 1462 // A refilled block would not satisfy the request.
duke@435 1463 return NULL;
duke@435 1464 }
duke@435 1465
duke@435 1466 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1467 refillLinearAllocBlock(blk);
duke@435 1468 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1469 "block was replenished");
duke@435 1470 if (res != NULL) {
duke@435 1471 splitBirth(size);
duke@435 1472 repairLinearAllocBlock(blk);
duke@435 1473 } else if (blk->_ptr != NULL) {
duke@435 1474 res = blk->_ptr;
duke@435 1475 size_t blk_size = blk->_word_size;
duke@435 1476 blk->_word_size -= size;
duke@435 1477 blk->_ptr += size;
duke@435 1478 splitBirth(size);
duke@435 1479 repairLinearAllocBlock(blk);
duke@435 1480 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1481 // view of the BOT and free blocks.
duke@435 1482 // Above must occur before BOT is updated below.
ysr@2071 1483 OrderAccess::storestore();
duke@435 1484 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1485 }
duke@435 1486 return res;
duke@435 1487 }
duke@435 1488
duke@435 1489 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1490 LinearAllocBlock* blk,
duke@435 1491 size_t size) {
duke@435 1492 assert_locked();
duke@435 1493 assert(size >= MinChunkSize, "too small");
duke@435 1494
duke@435 1495 HeapWord* res = NULL;
duke@435 1496 // This is the common case. Keep it simple.
duke@435 1497 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1498 assert(blk->_ptr != NULL, "consistency check");
duke@435 1499 res = blk->_ptr;
duke@435 1500 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1501 // indicates the start of the linAB. The split_block() updates the
duke@435 1502 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1503 // next chunk to be allocated).
duke@435 1504 size_t blk_size = blk->_word_size;
duke@435 1505 blk->_word_size -= size;
duke@435 1506 blk->_ptr += size;
duke@435 1507 splitBirth(size);
duke@435 1508 repairLinearAllocBlock(blk);
duke@435 1509 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1510 // view of the BOT and free blocks.
duke@435 1511 // Above must occur before BOT is updated below.
ysr@2071 1512 OrderAccess::storestore();
duke@435 1513 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1514 _bt.allocated(res, size);
duke@435 1515 }
duke@435 1516 return res;
duke@435 1517 }
duke@435 1518
duke@435 1519 FreeChunk*
duke@435 1520 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1521 assert_locked();
duke@435 1522 assert(size < SmallForDictionary, "just checking");
duke@435 1523 FreeChunk* res;
duke@435 1524 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1525 if (res == NULL) {
duke@435 1526 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1527 }
duke@435 1528 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1529 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1530 return res;
duke@435 1531 }
duke@435 1532
duke@435 1533 FreeChunk*
ysr@1580 1534 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1535 bool replenish) {
duke@435 1536 assert_locked();
duke@435 1537 FreeChunk* fc = NULL;
duke@435 1538 if (size < SmallForDictionary) {
duke@435 1539 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1540 _indexedFreeList[size].surplus() <= 0,
duke@435 1541 "List for this size should be empty or under populated");
duke@435 1542 // Try best fit in exact lists before replenishing the list
duke@435 1543 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1544 // Replenish list.
duke@435 1545 //
duke@435 1546 // Things tried that failed.
duke@435 1547 // Tried allocating out of the two LinAB's first before
duke@435 1548 // replenishing lists.
duke@435 1549 // Tried small linAB of size 256 (size in indexed list)
duke@435 1550 // and replenishing indexed lists from the small linAB.
duke@435 1551 //
duke@435 1552 FreeChunk* newFc = NULL;
ysr@1580 1553 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1554 if (replenish_size < SmallForDictionary) {
duke@435 1555 // Do not replenish from an underpopulated size.
duke@435 1556 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1557 _indexedFreeList[replenish_size].head() != NULL) {
ysr@1580 1558 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
ysr@1580 1559 } else if (bestFitFirst()) {
duke@435 1560 newFc = bestFitSmall(replenish_size);
duke@435 1561 }
duke@435 1562 }
ysr@1580 1563 if (newFc == NULL && replenish_size > size) {
ysr@1580 1564 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1565 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1566 }
ysr@1580 1567 // Note: The stats update re split-death of block obtained above
ysr@1580 1568 // will be recorded below precisely when we know we are going to
ysr@1580 1569 // be actually splitting it into more than one pieces below.
duke@435 1570 if (newFc != NULL) {
ysr@1580 1571 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1572 // Replenish this list and return one block to caller.
ysr@1580 1573 size_t i;
ysr@1580 1574 FreeChunk *curFc, *nextFc;
ysr@1580 1575 size_t num_blk = newFc->size() / size;
ysr@1580 1576 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1577 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1578 if (num_blk > 1) {
ysr@1580 1579 // we are sure we will be splitting the block just obtained
ysr@1580 1580 // into multiple pieces; record the split-death of the original
ysr@1580 1581 splitDeath(replenish_size);
ysr@1580 1582 }
ysr@1580 1583 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1584 // The last chunk is not added to the lists but is returned as the
ysr@1580 1585 // free chunk.
ysr@1580 1586 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1587 i = 0;
ysr@1580 1588 i < (num_blk - 1);
ysr@1580 1589 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1590 i++) {
ysr@1580 1591 curFc->setSize(size);
ysr@1580 1592 // Don't record this as a return in order to try and
ysr@1580 1593 // determine the "returns" from a GC.
ysr@1580 1594 _bt.verify_not_unallocated((HeapWord*) fc, size);
ysr@1580 1595 _indexedFreeList[size].returnChunkAtTail(curFc, false);
ysr@1580 1596 _bt.mark_block((HeapWord*)curFc, size);
ysr@1580 1597 splitBirth(size);
ysr@1580 1598 // Don't record the initial population of the indexed list
ysr@1580 1599 // as a split birth.
ysr@1580 1600 }
ysr@1580 1601
ysr@1580 1602 // check that the arithmetic was OK above
ysr@1580 1603 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1604 "inconsistency in carving newFc");
duke@435 1605 curFc->setSize(size);
duke@435 1606 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1607 splitBirth(size);
ysr@1580 1608 fc = curFc;
ysr@1580 1609 } else {
ysr@1580 1610 // Return entire block to caller
ysr@1580 1611 fc = newFc;
duke@435 1612 }
duke@435 1613 }
duke@435 1614 }
duke@435 1615 } else {
duke@435 1616 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1617 // replenish the indexed free list.
duke@435 1618 fc = getChunkFromDictionaryExact(size);
duke@435 1619 }
ysr@1580 1620 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1621 return fc;
duke@435 1622 }
duke@435 1623
duke@435 1624 FreeChunk*
duke@435 1625 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1626 assert_locked();
duke@435 1627 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1628 if (fc == NULL) {
duke@435 1629 return NULL;
duke@435 1630 }
duke@435 1631 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1632 if (fc->size() >= size + MinChunkSize) {
duke@435 1633 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1634 }
duke@435 1635 assert(fc->size() >= size, "chunk too small");
duke@435 1636 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1637 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1638 return fc;
duke@435 1639 }
duke@435 1640
duke@435 1641 FreeChunk*
duke@435 1642 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1643 assert_locked();
duke@435 1644 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1645 if (fc == NULL) {
duke@435 1646 return fc;
duke@435 1647 }
duke@435 1648 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1649 if (fc->size() == size) {
duke@435 1650 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1651 return fc;
duke@435 1652 }
duke@435 1653 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1654 if (fc->size() < size + MinChunkSize) {
duke@435 1655 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1656 returnChunkToDictionary(fc);
duke@435 1657 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1658 if (fc == NULL) {
duke@435 1659 return NULL;
duke@435 1660 }
duke@435 1661 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1662 }
duke@435 1663 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1664 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1665 assert(fc->size() == size, "chunk is wrong size");
duke@435 1666 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1667 return fc;
duke@435 1668 }
duke@435 1669
duke@435 1670 void
duke@435 1671 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1672 assert_locked();
duke@435 1673
duke@435 1674 size_t size = chunk->size();
duke@435 1675 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1676 // adjust _unallocated_block downward, as necessary
duke@435 1677 _bt.freed((HeapWord*)chunk, size);
duke@435 1678 _dictionary->returnChunk(chunk);
ysr@1580 1679 #ifndef PRODUCT
ysr@1580 1680 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1681 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1682 }
ysr@1580 1683 #endif // PRODUCT
duke@435 1684 }
duke@435 1685
duke@435 1686 void
duke@435 1687 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1688 assert_locked();
duke@435 1689 size_t size = fc->size();
duke@435 1690 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1691 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1692 if (_adaptive_freelists) {
duke@435 1693 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1694 } else {
duke@435 1695 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1696 }
ysr@1580 1697 #ifndef PRODUCT
ysr@1580 1698 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1699 _indexedFreeList[size].verify_stats();
ysr@1580 1700 }
ysr@1580 1701 #endif // PRODUCT
duke@435 1702 }
duke@435 1703
duke@435 1704 // Add chunk to end of last block -- if it's the largest
duke@435 1705 // block -- and update BOT and census data. We would
duke@435 1706 // of course have preferred to coalesce it with the
duke@435 1707 // last block, but it's currently less expensive to find the
duke@435 1708 // largest block than it is to find the last.
duke@435 1709 void
duke@435 1710 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1711 HeapWord* chunk, size_t size) {
duke@435 1712 // check that the chunk does lie in this space!
duke@435 1713 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1714 // One of the parallel gc task threads may be here
duke@435 1715 // whilst others are allocating.
duke@435 1716 Mutex* lock = NULL;
duke@435 1717 if (ParallelGCThreads != 0) {
duke@435 1718 lock = &_parDictionaryAllocLock;
duke@435 1719 }
duke@435 1720 FreeChunk* ec;
duke@435 1721 {
duke@435 1722 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1723 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1724 if (ec != NULL && ec->end() == chunk) {
duke@435 1725 // It's a coterminal block - we can coalesce.
duke@435 1726 size_t old_size = ec->size();
duke@435 1727 coalDeath(old_size);
duke@435 1728 removeChunkFromDictionary(ec);
duke@435 1729 size += old_size;
duke@435 1730 } else {
duke@435 1731 ec = (FreeChunk*)chunk;
duke@435 1732 }
duke@435 1733 }
duke@435 1734 ec->setSize(size);
duke@435 1735 debug_only(ec->mangleFreed(size));
duke@435 1736 if (size < SmallForDictionary) {
duke@435 1737 lock = _indexedFreeListParLocks[size];
duke@435 1738 }
duke@435 1739 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1740 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1741 // record the birth under the lock since the recording involves
duke@435 1742 // manipulation of the list on which the chunk lives and
duke@435 1743 // if the chunk is allocated and is the last on the list,
duke@435 1744 // the list can go away.
duke@435 1745 coalBirth(size);
duke@435 1746 }
duke@435 1747
duke@435 1748 void
duke@435 1749 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1750 size_t size) {
duke@435 1751 // check that the chunk does lie in this space!
duke@435 1752 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1753 assert_locked();
duke@435 1754 _bt.verify_single_block(chunk, size);
duke@435 1755
duke@435 1756 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1757 fc->setSize(size);
duke@435 1758 debug_only(fc->mangleFreed(size));
duke@435 1759 if (size < SmallForDictionary) {
duke@435 1760 returnChunkToFreeList(fc);
duke@435 1761 } else {
duke@435 1762 returnChunkToDictionary(fc);
duke@435 1763 }
duke@435 1764 }
duke@435 1765
duke@435 1766 void
duke@435 1767 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1768 size_t size, bool coalesced) {
duke@435 1769 assert_locked();
duke@435 1770 assert(chunk != NULL, "null chunk");
duke@435 1771 if (coalesced) {
duke@435 1772 // repair BOT
duke@435 1773 _bt.single_block(chunk, size);
duke@435 1774 }
duke@435 1775 addChunkToFreeLists(chunk, size);
duke@435 1776 }
duke@435 1777
duke@435 1778 // We _must_ find the purported chunk on our free lists;
duke@435 1779 // we assert if we don't.
duke@435 1780 void
duke@435 1781 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1782 size_t size = fc->size();
duke@435 1783 assert_locked();
duke@435 1784 debug_only(verifyFreeLists());
duke@435 1785 if (size < SmallForDictionary) {
duke@435 1786 removeChunkFromIndexedFreeList(fc);
duke@435 1787 } else {
duke@435 1788 removeChunkFromDictionary(fc);
duke@435 1789 }
duke@435 1790 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1791 debug_only(verifyFreeLists());
duke@435 1792 }
duke@435 1793
duke@435 1794 void
duke@435 1795 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1796 size_t size = fc->size();
duke@435 1797 assert_locked();
duke@435 1798 assert(fc != NULL, "null chunk");
duke@435 1799 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1800 _dictionary->removeChunk(fc);
duke@435 1801 // adjust _unallocated_block upward, as necessary
duke@435 1802 _bt.allocated((HeapWord*)fc, size);
duke@435 1803 }
duke@435 1804
duke@435 1805 void
duke@435 1806 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1807 assert_locked();
duke@435 1808 size_t size = fc->size();
duke@435 1809 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1810 NOT_PRODUCT(
duke@435 1811 if (FLSVerifyIndexTable) {
duke@435 1812 verifyIndexedFreeList(size);
duke@435 1813 }
duke@435 1814 )
duke@435 1815 _indexedFreeList[size].removeChunk(fc);
duke@435 1816 debug_only(fc->clearNext());
duke@435 1817 debug_only(fc->clearPrev());
duke@435 1818 NOT_PRODUCT(
duke@435 1819 if (FLSVerifyIndexTable) {
duke@435 1820 verifyIndexedFreeList(size);
duke@435 1821 }
duke@435 1822 )
duke@435 1823 }
duke@435 1824
duke@435 1825 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1826 /* A hint is the next larger size that has a surplus.
duke@435 1827 Start search at a size large enough to guarantee that
duke@435 1828 the excess is >= MIN_CHUNK. */
duke@435 1829 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1830 if (start < IndexSetSize) {
duke@435 1831 FreeList* it = _indexedFreeList;
duke@435 1832 size_t hint = _indexedFreeList[start].hint();
duke@435 1833 while (hint < IndexSetSize) {
duke@435 1834 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1835 FreeList *fl = &_indexedFreeList[hint];
duke@435 1836 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1837 // Found a list with surplus, reset original hint
duke@435 1838 // and split out a free chunk which is returned.
duke@435 1839 _indexedFreeList[start].set_hint(hint);
duke@435 1840 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1841 assert(res == NULL || res->isFree(),
duke@435 1842 "Should be returning a free chunk");
duke@435 1843 return res;
duke@435 1844 }
duke@435 1845 hint = fl->hint(); /* keep looking */
duke@435 1846 }
duke@435 1847 /* None found. */
duke@435 1848 it[start].set_hint(IndexSetSize);
duke@435 1849 }
duke@435 1850 return NULL;
duke@435 1851 }
duke@435 1852
duke@435 1853 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1854 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1855 size_t numWords) {
duke@435 1856 FreeChunk *curr = fl->head();
duke@435 1857 size_t oldNumWords = curr->size();
duke@435 1858 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1859 assert(curr != NULL, "List is empty");
duke@435 1860 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1861 "Size of chunks in the list is too small");
duke@435 1862
duke@435 1863 fl->removeChunk(curr);
duke@435 1864 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1865 // smallSplit(oldNumWords, numWords);
duke@435 1866 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1867 // Does anything have to be done for the remainder in terms of
duke@435 1868 // fixing the card table?
duke@435 1869 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1870 "Should be returning a free chunk");
duke@435 1871 return new_chunk;
duke@435 1872 }
duke@435 1873
duke@435 1874 FreeChunk*
duke@435 1875 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1876 size_t new_size) {
duke@435 1877 assert_locked();
duke@435 1878 size_t size = chunk->size();
duke@435 1879 assert(size > new_size, "Split from a smaller block?");
duke@435 1880 assert(is_aligned(chunk), "alignment problem");
duke@435 1881 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1882 size_t rem_size = size - new_size;
duke@435 1883 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1884 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1885 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1886 assert(is_aligned(ffc), "alignment problem");
duke@435 1887 ffc->setSize(rem_size);
duke@435 1888 ffc->linkNext(NULL);
duke@435 1889 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1890 // Above must occur before BOT is updated below.
duke@435 1891 // adjust block offset table
ysr@2071 1892 OrderAccess::storestore();
ysr@2071 1893 assert(chunk->isFree() && ffc->isFree(), "Error");
duke@435 1894 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1895 if (rem_size < SmallForDictionary) {
duke@435 1896 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1897 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1898 returnChunkToFreeList(ffc);
duke@435 1899 split(size, rem_size);
duke@435 1900 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1901 } else {
duke@435 1902 returnChunkToDictionary(ffc);
duke@435 1903 split(size ,rem_size);
duke@435 1904 }
duke@435 1905 chunk->setSize(new_size);
duke@435 1906 return chunk;
duke@435 1907 }
duke@435 1908
duke@435 1909 void
duke@435 1910 CompactibleFreeListSpace::sweep_completed() {
duke@435 1911 // Now that space is probably plentiful, refill linear
duke@435 1912 // allocation blocks as needed.
duke@435 1913 refillLinearAllocBlocksIfNeeded();
duke@435 1914 }
duke@435 1915
duke@435 1916 void
duke@435 1917 CompactibleFreeListSpace::gc_prologue() {
duke@435 1918 assert_locked();
duke@435 1919 if (PrintFLSStatistics != 0) {
duke@435 1920 gclog_or_tty->print("Before GC:\n");
duke@435 1921 reportFreeListStatistics();
duke@435 1922 }
duke@435 1923 refillLinearAllocBlocksIfNeeded();
duke@435 1924 }
duke@435 1925
duke@435 1926 void
duke@435 1927 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1928 assert_locked();
duke@435 1929 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1930 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1931 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1932 }
duke@435 1933 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1934 _promoInfo.stopTrackingPromotions();
duke@435 1935 repairLinearAllocationBlocks();
duke@435 1936 // Print Space's stats
duke@435 1937 if (PrintFLSStatistics != 0) {
duke@435 1938 gclog_or_tty->print("After GC:\n");
duke@435 1939 reportFreeListStatistics();
duke@435 1940 }
duke@435 1941 }
duke@435 1942
duke@435 1943 // Iteration support, mostly delegated from a CMS generation
duke@435 1944
duke@435 1945 void CompactibleFreeListSpace::save_marks() {
duke@435 1946 // mark the "end" of the used space at the time of this call;
duke@435 1947 // note, however, that promoted objects from this point
duke@435 1948 // on are tracked in the _promoInfo below.
ysr@2071 1949 set_saved_mark_word(unallocated_block());
duke@435 1950 // inform allocator that promotions should be tracked.
duke@435 1951 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1952 _promoInfo.startTrackingPromotions();
duke@435 1953 }
duke@435 1954
duke@435 1955 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1956 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 1957 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 1958 "Shouldn't be called if using parallel gc.");
duke@435 1959 return _promoInfo.noPromotions();
duke@435 1960 }
duke@435 1961
duke@435 1962 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 1963 \
duke@435 1964 void CompactibleFreeListSpace:: \
duke@435 1965 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 1966 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 1967 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 1968 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 1969 /* \
duke@435 1970 * This also restores any displaced headers and removes the elements from \
duke@435 1971 * the iteration set as they are processed, so that we have a clean slate \
duke@435 1972 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 1973 * promoted as a result of the iteration they are iterated over as well. \
duke@435 1974 */ \
duke@435 1975 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 1976 }
duke@435 1977
duke@435 1978 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 1979
duke@435 1980
duke@435 1981 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 1982 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 1983 guarantee(false, "NYI");
duke@435 1984 }
duke@435 1985
ysr@447 1986 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 1987 return _smallLinearAllocBlock._word_size == 0;
duke@435 1988 }
duke@435 1989
duke@435 1990 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 1991 // Fix up linear allocation blocks to look like free blocks
duke@435 1992 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 1993 }
duke@435 1994
duke@435 1995 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1996 assert_locked();
duke@435 1997 if (blk->_ptr != NULL) {
duke@435 1998 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 1999 "Minimum block size requirement");
duke@435 2000 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 2001 fc->setSize(blk->_word_size);
duke@435 2002 fc->linkPrev(NULL); // mark as free
duke@435 2003 fc->dontCoalesce();
duke@435 2004 assert(fc->isFree(), "just marked it free");
duke@435 2005 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2006 }
duke@435 2007 }
duke@435 2008
duke@435 2009 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2010 assert_locked();
duke@435 2011 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2012 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2013 "Size of linAB should be zero if the ptr is NULL");
duke@435 2014 // Reset the linAB refill and allocation size limit.
duke@435 2015 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2016 }
duke@435 2017 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2018 }
duke@435 2019
duke@435 2020 void
duke@435 2021 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2022 assert_locked();
duke@435 2023 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2024 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2025 "blk invariant");
duke@435 2026 if (blk->_ptr == NULL) {
duke@435 2027 refillLinearAllocBlock(blk);
duke@435 2028 }
duke@435 2029 if (PrintMiscellaneous && Verbose) {
duke@435 2030 if (blk->_word_size == 0) {
duke@435 2031 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2032 }
duke@435 2033 }
duke@435 2034 }
duke@435 2035
duke@435 2036 void
duke@435 2037 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2038 assert_locked();
duke@435 2039 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2040 "linear allocation block should be empty");
duke@435 2041 FreeChunk* fc;
duke@435 2042 if (blk->_refillSize < SmallForDictionary &&
duke@435 2043 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2044 // A linAB's strategy might be to use small sizes to reduce
duke@435 2045 // fragmentation but still get the benefits of allocation from a
duke@435 2046 // linAB.
duke@435 2047 } else {
duke@435 2048 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2049 }
duke@435 2050 if (fc != NULL) {
duke@435 2051 blk->_ptr = (HeapWord*)fc;
duke@435 2052 blk->_word_size = fc->size();
duke@435 2053 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2054 }
duke@435 2055 }
duke@435 2056
ysr@447 2057 // Support for concurrent collection policy decisions.
ysr@447 2058 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2059 // In the future we might want to add in frgamentation stats --
ysr@447 2060 // including erosion of the "mountain" into this decision as well.
ysr@447 2061 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2062 }
ysr@447 2063
duke@435 2064 // Support for compaction
duke@435 2065
duke@435 2066 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2067 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2068 // prepare_for_compaction() uses the space between live objects
duke@435 2069 // so that later phase can skip dead space quickly. So verification
duke@435 2070 // of the free lists doesn't work after.
duke@435 2071 }
duke@435 2072
duke@435 2073 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2074 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2075
duke@435 2076 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2077 // In other versions of adjust_pointers(), a bail out
duke@435 2078 // based on the amount of live data in the generation
duke@435 2079 // (i.e., if 0, bail out) may be used.
duke@435 2080 // Cannot test used() == 0 here because the free lists have already
duke@435 2081 // been mangled by the compaction.
duke@435 2082
duke@435 2083 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2084 // See note about verification in prepare_for_compaction().
duke@435 2085 }
duke@435 2086
duke@435 2087 void CompactibleFreeListSpace::compact() {
duke@435 2088 SCAN_AND_COMPACT(obj_size);
duke@435 2089 }
duke@435 2090
duke@435 2091 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2092 // where fbs is free block sizes
duke@435 2093 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2094 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2095 double frag = 0.0;
duke@435 2096 size_t i;
duke@435 2097
duke@435 2098 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2099 double sz = i;
duke@435 2100 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2101 }
duke@435 2102
duke@435 2103 double totFree = itabFree +
duke@435 2104 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 2105 if (totFree > 0) {
duke@435 2106 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2107 (totFree * totFree));
duke@435 2108 frag = (double)1.0 - frag;
duke@435 2109 } else {
duke@435 2110 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2111 }
duke@435 2112 return frag;
duke@435 2113 }
duke@435 2114
duke@435 2115 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2116 float inter_sweep_current,
ysr@1580 2117 float inter_sweep_estimate,
ysr@1580 2118 float intra_sweep_estimate) {
duke@435 2119 assert_locked();
duke@435 2120 size_t i;
duke@435 2121 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2122 FreeList* fl = &_indexedFreeList[i];
ysr@1580 2123 if (PrintFLSStatistics > 1) {
ysr@1580 2124 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2125 }
ysr@1580 2126 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
ysr@1580 2127 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
duke@435 2128 fl->set_beforeSweep(fl->count());
duke@435 2129 fl->set_bfrSurp(fl->surplus());
duke@435 2130 }
ysr@1580 2131 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
duke@435 2132 inter_sweep_current,
ysr@1580 2133 inter_sweep_estimate,
ysr@1580 2134 intra_sweep_estimate);
duke@435 2135 }
duke@435 2136
duke@435 2137 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2138 assert_locked();
duke@435 2139 size_t i;
duke@435 2140 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2141 FreeList *fl = &_indexedFreeList[i];
duke@435 2142 fl->set_surplus(fl->count() -
ysr@1580 2143 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2144 }
duke@435 2145 }
duke@435 2146
duke@435 2147 void CompactibleFreeListSpace::setFLHints() {
duke@435 2148 assert_locked();
duke@435 2149 size_t i;
duke@435 2150 size_t h = IndexSetSize;
duke@435 2151 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2152 FreeList *fl = &_indexedFreeList[i];
duke@435 2153 fl->set_hint(h);
duke@435 2154 if (fl->surplus() > 0) {
duke@435 2155 h = i;
duke@435 2156 }
duke@435 2157 }
duke@435 2158 }
duke@435 2159
duke@435 2160 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2161 assert_locked();
duke@435 2162 int i;
duke@435 2163 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2164 FreeList *fl = &_indexedFreeList[i];
duke@435 2165 fl->set_prevSweep(fl->count());
duke@435 2166 fl->set_coalBirths(0);
duke@435 2167 fl->set_coalDeaths(0);
duke@435 2168 fl->set_splitBirths(0);
duke@435 2169 fl->set_splitDeaths(0);
duke@435 2170 }
duke@435 2171 }
duke@435 2172
ysr@447 2173 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2174 if (PrintFLSStatistics > 0) {
ysr@1580 2175 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
ysr@1580 2176 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2177 largestAddr);
ysr@1580 2178 }
duke@435 2179 setFLSurplus();
duke@435 2180 setFLHints();
duke@435 2181 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2182 printFLCensus(sweep_count);
duke@435 2183 }
duke@435 2184 clearFLCensus();
duke@435 2185 assert_locked();
ysr@1580 2186 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
duke@435 2187 }
duke@435 2188
duke@435 2189 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2190 if (size < SmallForDictionary) {
duke@435 2191 FreeList *fl = &_indexedFreeList[size];
duke@435 2192 return (fl->coalDesired() < 0) ||
duke@435 2193 ((int)fl->count() > fl->coalDesired());
duke@435 2194 } else {
duke@435 2195 return dictionary()->coalDictOverPopulated(size);
duke@435 2196 }
duke@435 2197 }
duke@435 2198
duke@435 2199 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2200 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2201 FreeList *fl = &_indexedFreeList[size];
duke@435 2202 fl->increment_coalBirths();
duke@435 2203 fl->increment_surplus();
duke@435 2204 }
duke@435 2205
duke@435 2206 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2207 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2208 FreeList *fl = &_indexedFreeList[size];
duke@435 2209 fl->increment_coalDeaths();
duke@435 2210 fl->decrement_surplus();
duke@435 2211 }
duke@435 2212
duke@435 2213 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2214 if (size < SmallForDictionary) {
duke@435 2215 smallCoalBirth(size);
duke@435 2216 } else {
duke@435 2217 dictionary()->dictCensusUpdate(size,
duke@435 2218 false /* split */,
duke@435 2219 true /* birth */);
duke@435 2220 }
duke@435 2221 }
duke@435 2222
duke@435 2223 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2224 if(size < SmallForDictionary) {
duke@435 2225 smallCoalDeath(size);
duke@435 2226 } else {
duke@435 2227 dictionary()->dictCensusUpdate(size,
duke@435 2228 false /* split */,
duke@435 2229 false /* birth */);
duke@435 2230 }
duke@435 2231 }
duke@435 2232
duke@435 2233 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2234 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2235 FreeList *fl = &_indexedFreeList[size];
duke@435 2236 fl->increment_splitBirths();
duke@435 2237 fl->increment_surplus();
duke@435 2238 }
duke@435 2239
duke@435 2240 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2241 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2242 FreeList *fl = &_indexedFreeList[size];
duke@435 2243 fl->increment_splitDeaths();
duke@435 2244 fl->decrement_surplus();
duke@435 2245 }
duke@435 2246
duke@435 2247 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2248 if (size < SmallForDictionary) {
duke@435 2249 smallSplitBirth(size);
duke@435 2250 } else {
duke@435 2251 dictionary()->dictCensusUpdate(size,
duke@435 2252 true /* split */,
duke@435 2253 true /* birth */);
duke@435 2254 }
duke@435 2255 }
duke@435 2256
duke@435 2257 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2258 if (size < SmallForDictionary) {
duke@435 2259 smallSplitDeath(size);
duke@435 2260 } else {
duke@435 2261 dictionary()->dictCensusUpdate(size,
duke@435 2262 true /* split */,
duke@435 2263 false /* birth */);
duke@435 2264 }
duke@435 2265 }
duke@435 2266
duke@435 2267 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2268 size_t to2 = from - to1;
duke@435 2269 splitDeath(from);
duke@435 2270 splitBirth(to1);
duke@435 2271 splitBirth(to2);
duke@435 2272 }
duke@435 2273
duke@435 2274 void CompactibleFreeListSpace::print() const {
ysr@2294 2275 print_on(tty);
duke@435 2276 }
duke@435 2277
duke@435 2278 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2279 assert_locked();
duke@435 2280 repairLinearAllocationBlocks();
duke@435 2281 // Verify that the SpoolBlocks look like free blocks of
duke@435 2282 // appropriate sizes... To be done ...
duke@435 2283 }
duke@435 2284
duke@435 2285 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2286 private:
duke@435 2287 const CompactibleFreeListSpace* _sp;
duke@435 2288 const MemRegion _span;
ysr@2071 2289 HeapWord* _last_addr;
ysr@2071 2290 size_t _last_size;
ysr@2071 2291 bool _last_was_obj;
ysr@2071 2292 bool _last_was_live;
duke@435 2293
duke@435 2294 public:
duke@435 2295 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2296 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2297 _last_addr(NULL), _last_size(0),
ysr@2071 2298 _last_was_obj(false), _last_was_live(false) { }
duke@435 2299
coleenp@548 2300 virtual size_t do_blk(HeapWord* addr) {
duke@435 2301 size_t res;
ysr@2071 2302 bool was_obj = false;
ysr@2071 2303 bool was_live = false;
duke@435 2304 if (_sp->block_is_obj(addr)) {
ysr@2071 2305 was_obj = true;
duke@435 2306 oop p = oop(addr);
duke@435 2307 guarantee(p->is_oop(), "Should be an oop");
duke@435 2308 res = _sp->adjustObjectSize(p->size());
duke@435 2309 if (_sp->obj_is_alive(addr)) {
ysr@2071 2310 was_live = true;
duke@435 2311 p->verify();
duke@435 2312 }
duke@435 2313 } else {
duke@435 2314 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2315 res = fc->size();
duke@435 2316 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2317 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2318 "Chunk should be on a free list");
duke@435 2319 }
duke@435 2320 }
ysr@2071 2321 if (res == 0) {
ysr@2071 2322 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2323 gclog_or_tty->print_cr(
ysr@2071 2324 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2325 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
ysr@2071 2326 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
ysr@2071 2327 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2328 _sp->print_on(gclog_or_tty);
ysr@2071 2329 guarantee(false, "Seppuku!");
ysr@2071 2330 }
ysr@2071 2331 _last_addr = addr;
ysr@2071 2332 _last_size = res;
ysr@2071 2333 _last_was_obj = was_obj;
ysr@2071 2334 _last_was_live = was_live;
duke@435 2335 return res;
duke@435 2336 }
duke@435 2337 };
duke@435 2338
duke@435 2339 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2340 private:
duke@435 2341 const CMSCollector* _collector;
duke@435 2342 const CompactibleFreeListSpace* _sp;
duke@435 2343 const MemRegion _span;
duke@435 2344 const bool _past_remark;
duke@435 2345 const CMSBitMap* _bit_map;
duke@435 2346
coleenp@548 2347 protected:
coleenp@548 2348 void do_oop(void* p, oop obj) {
coleenp@548 2349 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2350 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2351 // Should be a valid object; the first disjunct below allows
coleenp@548 2352 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2353 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2354 // are spanned by _span (CMS heap) above.
coleenp@548 2355 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2356 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2357 "Should be an object");
coleenp@548 2358 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2359 obj->verify();
coleenp@548 2360 if (_past_remark) {
coleenp@548 2361 // Remark has been completed, the object should be marked
coleenp@548 2362 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2363 }
coleenp@548 2364 } else { // reference within CMS heap
coleenp@548 2365 if (_past_remark) {
coleenp@548 2366 // Remark has been completed -- so the referent should have
coleenp@548 2367 // been marked, if referring object is.
coleenp@548 2368 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2369 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2370 }
coleenp@548 2371 }
coleenp@548 2372 }
coleenp@548 2373 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2374 // the reference is from FLS, and points out of FLS
coleenp@548 2375 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2376 obj->verify();
coleenp@548 2377 }
coleenp@548 2378 }
coleenp@548 2379
coleenp@548 2380 template <class T> void do_oop_work(T* p) {
coleenp@548 2381 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2382 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2383 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2384 do_oop(p, obj);
coleenp@548 2385 }
coleenp@548 2386 }
coleenp@548 2387
duke@435 2388 public:
duke@435 2389 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2390 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2391 bool past_remark, CMSBitMap* bit_map) :
duke@435 2392 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2393 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2394
coleenp@548 2395 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2396 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2397 };
duke@435 2398
duke@435 2399 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2400 assert_lock_strong(&_freelistLock);
duke@435 2401 verify_objects_initialized();
duke@435 2402 MemRegion span = _collector->_span;
duke@435 2403 bool past_remark = (_collector->abstract_state() ==
duke@435 2404 CMSCollector::Sweeping);
duke@435 2405
duke@435 2406 ResourceMark rm;
duke@435 2407 HandleMark hm;
duke@435 2408
duke@435 2409 // Check integrity of CFL data structures
duke@435 2410 _promoInfo.verify();
duke@435 2411 _dictionary->verify();
duke@435 2412 if (FLSVerifyIndexTable) {
duke@435 2413 verifyIndexedFreeLists();
duke@435 2414 }
duke@435 2415 // Check integrity of all objects and free blocks in space
duke@435 2416 {
duke@435 2417 VerifyAllBlksClosure cl(this, span);
duke@435 2418 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2419 }
duke@435 2420 // Check that all references in the heap to FLS
duke@435 2421 // are to valid objects in FLS or that references in
duke@435 2422 // FLS are to valid objects elsewhere in the heap
duke@435 2423 if (FLSVerifyAllHeapReferences)
duke@435 2424 {
duke@435 2425 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2426 _collector->markBitMap());
duke@435 2427 CollectedHeap* ch = Universe::heap();
duke@435 2428 ch->oop_iterate(&cl); // all oops in generations
duke@435 2429 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2430 }
duke@435 2431
duke@435 2432 if (VerifyObjectStartArray) {
duke@435 2433 // Verify the block offset table
duke@435 2434 _bt.verify();
duke@435 2435 }
duke@435 2436 }
duke@435 2437
duke@435 2438 #ifndef PRODUCT
duke@435 2439 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2440 if (FLSVerifyLists) {
duke@435 2441 _dictionary->verify();
duke@435 2442 verifyIndexedFreeLists();
duke@435 2443 } else {
duke@435 2444 if (FLSVerifyDictionary) {
duke@435 2445 _dictionary->verify();
duke@435 2446 }
duke@435 2447 if (FLSVerifyIndexTable) {
duke@435 2448 verifyIndexedFreeLists();
duke@435 2449 }
duke@435 2450 }
duke@435 2451 }
duke@435 2452 #endif
duke@435 2453
duke@435 2454 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2455 size_t i = 0;
duke@435 2456 for (; i < MinChunkSize; i++) {
duke@435 2457 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2458 }
duke@435 2459 for (; i < IndexSetSize; i++) {
duke@435 2460 verifyIndexedFreeList(i);
duke@435 2461 }
duke@435 2462 }
duke@435 2463
duke@435 2464 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2465 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2466 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2467 size_t num = _indexedFreeList[size].count();
ysr@1580 2468 size_t n = 0;
ysr@777 2469 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
ysr@1580 2470 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2471 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2472 guarantee(fc->isFree(), "!free?");
duke@435 2473 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2474 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2475 }
ysr@1580 2476 guarantee(n == num, "Incorrect count");
duke@435 2477 }
duke@435 2478
duke@435 2479 #ifndef PRODUCT
duke@435 2480 void CompactibleFreeListSpace::checkFreeListConsistency() const {
duke@435 2481 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2482 "Some sizes can't be allocated without recourse to"
duke@435 2483 " linear allocation buffers");
duke@435 2484 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2485 "else MIN_TREE_CHUNK_SIZE is wrong");
duke@435 2486 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
duke@435 2487 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
duke@435 2488 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2489 "Some for-loops may be incorrectly initialized");
duke@435 2490 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2491 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2492 }
duke@435 2493 #endif
duke@435 2494
ysr@447 2495 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2496 assert_lock_strong(&_freelistLock);
ysr@447 2497 FreeList total;
ysr@447 2498 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2499 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2500 size_t totalFree = 0;
duke@435 2501 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2502 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2503 totalFree += fl->count() * fl->size();
ysr@447 2504 if (i % (40*IndexSetStride) == 0) {
ysr@447 2505 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2506 }
ysr@447 2507 fl->print_on(gclog_or_tty);
ysr@447 2508 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2509 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2510 total.set_desired( total.desired() + fl->desired() );
ysr@447 2511 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2512 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2513 total.set_count( total.count() + fl->count() );
ysr@447 2514 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2515 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2516 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2517 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2518 }
ysr@447 2519 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2520 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2521 SIZE_FORMAT " words", totalFree);
duke@435 2522 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2523 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2524 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2525 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2526 _dictionary->printDictCensus();
duke@435 2527 }
duke@435 2528
ysr@1580 2529 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2530 // CFLS_LAB
ysr@1580 2531 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2532
ysr@1580 2533 #define VECTOR_257(x) \
ysr@1580 2534 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2535 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2536 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2537 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2538 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2539 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2540 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2541 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2542 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2543 x }
ysr@1580 2544
ysr@1580 2545 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2546 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2547 // command-line, this will get reinitialized via a call to
ysr@1580 2548 // modify_initialization() below.
ysr@1580 2549 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2550 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2551 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
ysr@1580 2552 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2553
duke@435 2554 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2555 _cfls(cfls)
duke@435 2556 {
ysr@1580 2557 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2558 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2559 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2560 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2561 _indexedFreeList[i].set_size(i);
ysr@1580 2562 _num_blocks[i] = 0;
ysr@1580 2563 }
ysr@1580 2564 }
ysr@1580 2565
ysr@1580 2566 static bool _CFLS_LAB_modified = false;
ysr@1580 2567
ysr@1580 2568 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2569 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2570 _CFLS_LAB_modified = true;
ysr@1580 2571 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2572 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2573 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2574 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2575 }
duke@435 2576 }
duke@435 2577
duke@435 2578 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2579 FreeChunk* res;
ysr@2132 2580 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2581 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2582 // This locking manages sync with other large object allocations.
duke@435 2583 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2584 Mutex::_no_safepoint_check_flag);
duke@435 2585 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2586 if (res == NULL) return NULL;
duke@435 2587 } else {
duke@435 2588 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2589 if (fl->count() == 0) {
duke@435 2590 // Attempt to refill this local free list.
ysr@1580 2591 get_from_global_pool(word_sz, fl);
duke@435 2592 // If it didn't work, give up.
duke@435 2593 if (fl->count() == 0) return NULL;
duke@435 2594 }
duke@435 2595 res = fl->getChunkAtHead();
duke@435 2596 assert(res != NULL, "Why was count non-zero?");
duke@435 2597 }
duke@435 2598 res->markNotFree();
duke@435 2599 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2600 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2601 // mangle a just allocated object with a distinct pattern.
duke@435 2602 debug_only(res->mangleAllocated(word_sz));
duke@435 2603 return (HeapWord*)res;
duke@435 2604 }
duke@435 2605
ysr@1580 2606 // Get a chunk of blocks of the right size and update related
ysr@1580 2607 // book-keeping stats
ysr@1580 2608 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
ysr@1580 2609 // Get the #blocks we want to claim
ysr@1580 2610 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2611 assert(n_blks > 0, "Error");
ysr@1580 2612 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2613 // In some cases, when the application has a phase change,
ysr@1580 2614 // there may be a sudden and sharp shift in the object survival
ysr@1580 2615 // profile, and updating the counts at the end of a scavenge
ysr@1580 2616 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2617 // during these phase changes. It is beneficial to detect such
ysr@1580 2618 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2619 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2620 // It is protected by a product flag until we have gained
ysr@1580 2621 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2622 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2623 // small spikes, so some kind of historical smoothing based on
ysr@1580 2624 // previous experience with the greater reactivity might be useful.
ysr@1580 2625 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2626 // default.
ysr@1580 2627 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2628 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2629 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2630 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2631 }
ysr@1580 2632 assert(n_blks > 0, "Error");
ysr@1580 2633 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2634 // Update stats table entry for this block size
ysr@1580 2635 _num_blocks[word_sz] += fl->count();
ysr@1580 2636 }
ysr@1580 2637
ysr@1580 2638 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2639 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2640 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2641 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2642 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2643 "Counter inconsistency");
ysr@1580 2644 if (_global_num_workers[i] > 0) {
ysr@1580 2645 // Need to smooth wrt historical average
ysr@1580 2646 if (ResizeOldPLAB) {
ysr@1580 2647 _blocks_to_claim[i].sample(
ysr@1580 2648 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2649 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2650 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2651 }
ysr@1580 2652 // Reset counters for next round
ysr@1580 2653 _global_num_workers[i] = 0;
ysr@1580 2654 _global_num_blocks[i] = 0;
ysr@1580 2655 if (PrintOldPLAB) {
ysr@1580 2656 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2657 }
duke@435 2658 }
duke@435 2659 }
duke@435 2660 }
duke@435 2661
ysr@1580 2662 void CFLS_LAB::retire(int tid) {
ysr@1580 2663 // We run this single threaded with the world stopped;
ysr@1580 2664 // so no need for locks and such.
ysr@1580 2665 #define CFLS_LAB_PARALLEL_ACCESS 0
ysr@1580 2666 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2667 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2668 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
ysr@1580 2669 "Will access to uninitialized slot below");
ysr@1580 2670 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2671 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
ysr@1580 2672 i > 0;
ysr@1580 2673 i -= CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2674 #else // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2675 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2676 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2677 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2678 #endif // !CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2679 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2680 "Can't retire more than what we obtained");
ysr@1580 2681 if (_num_blocks[i] > 0) {
ysr@1580 2682 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2683 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2684 {
ysr@1580 2685 #if CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2686 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@1580 2687 Mutex::_no_safepoint_check_flag);
ysr@1580 2688 #endif // CFLS_LAB_PARALLEL_ACCESS
ysr@1580 2689 // Update globals stats for num_blocks used
ysr@1580 2690 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2691 _global_num_workers[i]++;
ysr@1580 2692 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
ysr@1580 2693 if (num_retire > 0) {
ysr@1580 2694 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2695 // Reset this list.
ysr@1580 2696 _indexedFreeList[i] = FreeList();
ysr@1580 2697 _indexedFreeList[i].set_size(i);
ysr@1580 2698 }
ysr@1580 2699 }
ysr@1580 2700 if (PrintOldPLAB) {
ysr@1580 2701 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2702 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2703 }
ysr@1580 2704 // Reset stats for next round
ysr@1580 2705 _num_blocks[i] = 0;
ysr@1580 2706 }
ysr@1580 2707 }
ysr@1580 2708 }
ysr@1580 2709
ysr@1580 2710 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2711 assert(fl->count() == 0, "Precondition.");
duke@435 2712 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2713 "Precondition");
duke@435 2714
ysr@1580 2715 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2716 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2717 // then try getting a big chunk and splitting it.
ysr@1580 2718 {
ysr@1580 2719 bool found;
ysr@1580 2720 int k;
ysr@1580 2721 size_t cur_sz;
ysr@1580 2722 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2723 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2724 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2725 k++, cur_sz = k * word_sz) {
ysr@1580 2726 FreeList fl_for_cur_sz; // Empty.
ysr@1580 2727 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2728 {
ysr@1580 2729 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2730 Mutex::_no_safepoint_check_flag);
ysr@2071 2731 FreeList* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2732 if (gfl->count() != 0) {
ysr@1580 2733 // nn is the number of chunks of size cur_sz that
ysr@1580 2734 // we'd need to split k-ways each, in order to create
ysr@1580 2735 // "n" chunks of size word_sz each.
ysr@1580 2736 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2737 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2738 found = true;
ysr@1580 2739 if (k > 1) {
ysr@1580 2740 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2741 // we increment the split death count by the number of blocks
ysr@1580 2742 // we just took from the cur_sz-size blocks list and which
ysr@1580 2743 // we will be splitting below.
ysr@2071 2744 ssize_t deaths = gfl->splitDeaths() +
ysr@1580 2745 fl_for_cur_sz.count();
ysr@2071 2746 gfl->set_splitDeaths(deaths);
ysr@1580 2747 }
ysr@1580 2748 }
ysr@1580 2749 }
ysr@1580 2750 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2751 if (found) {
ysr@1580 2752 if (k == 1) {
ysr@1580 2753 fl->prepend(&fl_for_cur_sz);
ysr@1580 2754 } else {
ysr@1580 2755 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2756 FreeChunk* fc;
ysr@1580 2757 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
ysr@1580 2758 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2759 // access the main chunk sees it as a single free block until we
ysr@1580 2760 // change it.
ysr@1580 2761 size_t fc_size = fc->size();
ysr@2071 2762 assert(fc->isFree(), "Error");
ysr@1580 2763 for (int i = k-1; i >= 0; i--) {
ysr@1580 2764 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2765 assert((i != 0) ||
ysr@2071 2766 ((fc == ffc) && ffc->isFree() &&
ysr@2071 2767 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2768 "Counting error");
ysr@1580 2769 ffc->setSize(word_sz);
ysr@2071 2770 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ysr@1580 2771 ffc->linkNext(NULL);
ysr@1580 2772 // Above must occur before BOT is updated below.
ysr@2071 2773 OrderAccess::storestore();
ysr@2071 2774 // splitting from the right, fc_size == i * word_sz
ysr@2071 2775 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2776 fc_size -= word_sz;
ysr@2071 2777 assert(fc_size == i*word_sz, "Error");
ysr@2071 2778 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2779 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2780 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2781 // Push this on "fl".
ysr@1580 2782 fl->returnChunkAtHead(ffc);
ysr@1580 2783 }
ysr@1580 2784 // TRAP
ysr@1580 2785 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2786 }
ysr@1580 2787 }
ysr@1580 2788 // Update birth stats for this block size.
ysr@1580 2789 size_t num = fl->count();
ysr@1580 2790 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2791 Mutex::_no_safepoint_check_flag);
ysr@1580 2792 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
ysr@1580 2793 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2794 return;
duke@435 2795 }
duke@435 2796 }
duke@435 2797 }
duke@435 2798 // Otherwise, we'll split a block from the dictionary.
duke@435 2799 FreeChunk* fc = NULL;
duke@435 2800 FreeChunk* rem_fc = NULL;
duke@435 2801 size_t rem;
duke@435 2802 {
duke@435 2803 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2804 Mutex::_no_safepoint_check_flag);
duke@435 2805 while (n > 0) {
duke@435 2806 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2807 _dictionary->minSize()),
duke@435 2808 FreeBlockDictionary::atLeast);
duke@435 2809 if (fc != NULL) {
ysr@2071 2810 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
duke@435 2811 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2812 true /*split*/,
duke@435 2813 false /*birth*/);
duke@435 2814 break;
duke@435 2815 } else {
duke@435 2816 n--;
duke@435 2817 }
duke@435 2818 }
duke@435 2819 if (fc == NULL) return;
ysr@2071 2820 // Otherwise, split up that block.
ysr@1580 2821 assert((ssize_t)n >= 1, "Control point invariant");
ysr@2071 2822 assert(fc->isFree(), "Error: should be a free block");
ysr@2071 2823 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2824 const size_t nn = fc->size() / word_sz;
duke@435 2825 n = MIN2(nn, n);
ysr@1580 2826 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2827 rem = fc->size() - n * word_sz;
duke@435 2828 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2829 if (rem > 0 && rem < MinChunkSize) {
duke@435 2830 n--; rem += word_sz;
duke@435 2831 }
jmasa@1583 2832 // Note that at this point we may have n == 0.
jmasa@1583 2833 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2834
jmasa@1583 2835 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2836 // enough to leave a viable remainder. We are unable to
jmasa@1583 2837 // allocate even one block. Return fc to the
jmasa@1583 2838 // dictionary and return, leaving "fl" empty.
jmasa@1583 2839 if (n == 0) {
jmasa@1583 2840 returnChunkToDictionary(fc);
ysr@2071 2841 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2842 return;
jmasa@1583 2843 }
jmasa@1583 2844
duke@435 2845 // First return the remainder, if any.
duke@435 2846 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2847 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2848 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2849 // hit if the block is a small block.)
duke@435 2850 if (rem > 0) {
duke@435 2851 size_t prefix_size = n * word_sz;
duke@435 2852 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2853 rem_fc->setSize(rem);
ysr@2071 2854 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2855 rem_fc->linkNext(NULL);
duke@435 2856 // Above must occur before BOT is updated below.
ysr@1580 2857 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2858 OrderAccess::storestore();
duke@435 2859 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
ysr@2071 2860 assert(fc->isFree(), "Error");
ysr@2071 2861 fc->setSize(prefix_size);
duke@435 2862 if (rem >= IndexSetSize) {
duke@435 2863 returnChunkToDictionary(rem_fc);
ysr@1580 2864 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
duke@435 2865 rem_fc = NULL;
duke@435 2866 }
duke@435 2867 // Otherwise, return it to the small list below.
duke@435 2868 }
duke@435 2869 }
duke@435 2870 if (rem_fc != NULL) {
duke@435 2871 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2872 Mutex::_no_safepoint_check_flag);
duke@435 2873 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2874 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2875 smallSplitBirth(rem);
duke@435 2876 }
ysr@1580 2877 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2878 // Now do the splitting up.
duke@435 2879 // Must do this in reverse order, so that anybody attempting to
duke@435 2880 // access the main chunk sees it as a single free block until we
duke@435 2881 // change it.
duke@435 2882 size_t fc_size = n * word_sz;
duke@435 2883 // All but first chunk in this loop
duke@435 2884 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2885 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2886 ffc->setSize(word_sz);
ysr@2071 2887 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2888 ffc->linkNext(NULL);
duke@435 2889 // Above must occur before BOT is updated below.
ysr@2071 2890 OrderAccess::storestore();
duke@435 2891 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2892 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2893 fc_size -= word_sz;
duke@435 2894 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2895 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2896 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2897 // Push this on "fl".
duke@435 2898 fl->returnChunkAtHead(ffc);
duke@435 2899 }
duke@435 2900 // First chunk
ysr@2071 2901 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2902 // The blocks above should show their new sizes before the first block below
duke@435 2903 fc->setSize(word_sz);
ysr@2071 2904 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
duke@435 2905 fc->linkNext(NULL);
duke@435 2906 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2907 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2908 fl->returnChunkAtHead(fc);
duke@435 2909
ysr@1580 2910 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2911 {
ysr@1580 2912 // Update the stats for this block size.
duke@435 2913 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2914 Mutex::_no_safepoint_check_flag);
ysr@1580 2915 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
ysr@1580 2916 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2917 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2918 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2919 }
duke@435 2920
duke@435 2921 // TRAP
duke@435 2922 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2923 }
duke@435 2924
duke@435 2925 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2926 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2927 // XXX Need to suitably abstract and generalize this and the next
duke@435 2928 // method into one.
duke@435 2929 void
duke@435 2930 CompactibleFreeListSpace::
duke@435 2931 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2932 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2933 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2934 const size_t task_size = rescan_task_size();
duke@435 2935 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2936 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2937 assert(n_tasks == 0 ||
ysr@775 2938 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2939 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2940 "n_tasks calculation incorrect");
duke@435 2941 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2942 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2943 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2944 // need to finish in order to be done).
jmasa@2188 2945 pst->set_n_threads(n_threads);
duke@435 2946 pst->set_n_tasks((int)n_tasks);
duke@435 2947 }
duke@435 2948
duke@435 2949 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2950 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2951 void
duke@435 2952 CompactibleFreeListSpace::
duke@435 2953 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2954 HeapWord* low) {
duke@435 2955 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2956 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2957 const size_t task_size = marking_task_size();
duke@435 2958 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 2959 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 2960 "Otherwise arithmetic below would be incorrect");
duke@435 2961 MemRegion span = _gen->reserved();
duke@435 2962 if (low != NULL) {
duke@435 2963 if (span.contains(low)) {
duke@435 2964 // Align low down to a card boundary so that
duke@435 2965 // we can use block_offset_careful() on span boundaries.
duke@435 2966 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 2967 CardTableModRefBS::card_size);
duke@435 2968 // Clip span prefix at aligned_low
duke@435 2969 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 2970 } else if (low > span.end()) {
duke@435 2971 span = MemRegion(low, low); // Null region
duke@435 2972 } // else use entire span
duke@435 2973 }
duke@435 2974 assert(span.is_empty() ||
duke@435 2975 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 2976 "span should start at a card boundary");
duke@435 2977 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 2978 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 2979 assert(n_tasks == 0 ||
duke@435 2980 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 2981 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 2982 "n_tasks calculation incorrect");
duke@435 2983 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2984 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2985 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2986 // need to finish in order to be done).
jmasa@2188 2987 pst->set_n_threads(n_threads);
duke@435 2988 pst->set_n_tasks((int)n_tasks);
duke@435 2989 }

mercurial