src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 09 Mar 2009 13:28:46 -0700

author
xdono
date
Mon, 09 Mar 2009 13:28:46 -0700
changeset 1014
0fbdb4381b99
parent 953
0af8b0718fc9
child 1580
e018e6884bd8
permissions
-rw-r--r--

6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair

duke@435 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
duke@435 27
duke@435 28 /////////////////////////////////////////////////////////////////////////
duke@435 29 //// CompactibleFreeListSpace
duke@435 30 /////////////////////////////////////////////////////////////////////////
duke@435 31
duke@435 32 // highest ranked free list lock rank
duke@435 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 34
duke@435 35 // Constructor
duke@435 36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 37 MemRegion mr, bool use_adaptive_freelists,
duke@435 38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 39 _dictionaryChoice(dictionaryChoice),
duke@435 40 _adaptive_freelists(use_adaptive_freelists),
duke@435 41 _bt(bs, mr),
duke@435 42 // free list locks are in the range of values taken by _lockRank
duke@435 43 // This range currently is [_leaf+2, _leaf+3]
duke@435 44 // Note: this requires that CFLspace c'tors
duke@435 45 // are called serially in the order in which the locks are
duke@435 46 // are acquired in the program text. This is true today.
duke@435 47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 49 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 51 CMSRescanMultiple),
duke@435 52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 53 CMSConcMarkMultiple),
duke@435 54 _collector(NULL)
duke@435 55 {
duke@435 56 _bt.set_space(this);
jmasa@698 57 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 58 // We have all of "mr", all of which we place in the dictionary
duke@435 59 // as one big chunk. We'll need to decide here which of several
duke@435 60 // possible alternative dictionary implementations to use. For
duke@435 61 // now the choice is easy, since we have only one working
duke@435 62 // implementation, namely, the simple binary tree (splaying
duke@435 63 // temporarily disabled).
duke@435 64 switch (dictionaryChoice) {
duke@435 65 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 66 _dictionary = new BinaryTreeDictionary(mr);
duke@435 67 break;
duke@435 68 case FreeBlockDictionary::dictionarySplayTree:
duke@435 69 case FreeBlockDictionary::dictionarySkipList:
duke@435 70 default:
duke@435 71 warning("dictionaryChoice: selected option not understood; using"
duke@435 72 " default BinaryTreeDictionary implementation instead.");
duke@435 73 _dictionary = new BinaryTreeDictionary(mr);
duke@435 74 break;
duke@435 75 }
duke@435 76 splitBirth(mr.word_size());
duke@435 77 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 78 // The indexed free lists are initially all empty and are lazily
duke@435 79 // filled in on demand. Initialize the array elements to NULL.
duke@435 80 initializeIndexedFreeListArray();
duke@435 81
duke@435 82 // Not using adaptive free lists assumes that allocation is first
duke@435 83 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 84 // has to have the klass's klassKlass allocated at a lower
duke@435 85 // address in the heap than the klass so that the klassKlass is
duke@435 86 // moved to its new location before the klass is moved.
duke@435 87 // Set the _refillSize for the linear allocation blocks
duke@435 88 if (!use_adaptive_freelists) {
duke@435 89 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 90 // The small linAB initially has all the space and will allocate
duke@435 91 // a chunk of any size.
duke@435 92 HeapWord* addr = (HeapWord*) fc;
duke@435 93 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 94 1024*SmallForLinearAlloc, fc->size());
duke@435 95 // Note that _unallocated_block is not updated here.
duke@435 96 // Allocations from the linear allocation block should
duke@435 97 // update it.
duke@435 98 } else {
duke@435 99 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 100 SmallForLinearAlloc);
duke@435 101 }
duke@435 102 // CMSIndexedFreeListReplenish should be at least 1
duke@435 103 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 104 _promoInfo.setSpace(this);
duke@435 105 if (UseCMSBestFit) {
duke@435 106 _fitStrategy = FreeBlockBestFitFirst;
duke@435 107 } else {
duke@435 108 _fitStrategy = FreeBlockStrategyNone;
duke@435 109 }
duke@435 110 checkFreeListConsistency();
duke@435 111
duke@435 112 // Initialize locks for parallel case.
duke@435 113 if (ParallelGCThreads > 0) {
duke@435 114 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 115 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 116 "a freelist par lock",
duke@435 117 true);
duke@435 118 if (_indexedFreeListParLocks[i] == NULL)
duke@435 119 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 120 DEBUG_ONLY(
duke@435 121 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 122 )
duke@435 123 }
duke@435 124 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 125 }
duke@435 126 }
duke@435 127
duke@435 128 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 129 // update the block offset table. Removed initialize_threshold call because
duke@435 130 // CFLS does not use a block offset array for contiguous spaces.
duke@435 131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 132 CompactPoint* cp, HeapWord* compact_top) {
duke@435 133 // q is alive
duke@435 134 // First check if we should switch compaction space
duke@435 135 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 136 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 137 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 138 "virtual adjustObjectSize_v() method is not correct");
duke@435 139 size_t adjusted_size = adjustObjectSize(size);
duke@435 140 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 141 "no small fragments allowed");
duke@435 142 assert(minimum_free_block_size() == MinChunkSize,
duke@435 143 "for de-virtualized reference below");
duke@435 144 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 145 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 146 adjusted_size != compaction_max_size) {
duke@435 147 do {
duke@435 148 // switch to next compaction space
duke@435 149 cp->space->set_compaction_top(compact_top);
duke@435 150 cp->space = cp->space->next_compaction_space();
duke@435 151 if (cp->space == NULL) {
duke@435 152 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 153 assert(cp->gen != NULL, "compaction must succeed");
duke@435 154 cp->space = cp->gen->first_compaction_space();
duke@435 155 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 156 }
duke@435 157 compact_top = cp->space->bottom();
duke@435 158 cp->space->set_compaction_top(compact_top);
duke@435 159 // The correct adjusted_size may not be the same as that for this method
duke@435 160 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 161 // Use the virtual method which is not used above to save the virtual
duke@435 162 // dispatch.
duke@435 163 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 164 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 165 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 166 } while (adjusted_size > compaction_max_size);
duke@435 167 }
duke@435 168
duke@435 169 // store the forwarding pointer into the mark word
duke@435 170 if ((HeapWord*)q != compact_top) {
duke@435 171 q->forward_to(oop(compact_top));
duke@435 172 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 173 } else {
duke@435 174 // if the object isn't moving we can just set the mark to the default
duke@435 175 // mark and handle it specially later on.
duke@435 176 q->init_mark();
duke@435 177 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 178 }
duke@435 179
coleenp@548 180 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 181 compact_top += adjusted_size;
duke@435 182
duke@435 183 // we need to update the offset table so that the beginnings of objects can be
duke@435 184 // found during scavenge. Note that we are updating the offset table based on
duke@435 185 // where the object will be once the compaction phase finishes.
duke@435 186
duke@435 187 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 188 // the compaction_top exceeds the current threshold but not for an
duke@435 189 // non-contiguous space.
duke@435 190 cp->threshold =
duke@435 191 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 192 return compact_top;
duke@435 193 }
duke@435 194
duke@435 195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 196 // and use of single_block instead of alloc_block. The name here is not really
duke@435 197 // appropriate - maybe a more general name could be invented for both the
duke@435 198 // contiguous and noncontiguous spaces.
duke@435 199
duke@435 200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 201 _bt.single_block(start, the_end);
duke@435 202 return end();
duke@435 203 }
duke@435 204
duke@435 205 // Initialize them to NULL.
duke@435 206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 207 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 208 // Note that on platforms where objects are double word aligned,
duke@435 209 // the odd array elements are not used. It is convenient, however,
duke@435 210 // to map directly from the object size to the array element.
duke@435 211 _indexedFreeList[i].reset(IndexSetSize);
duke@435 212 _indexedFreeList[i].set_size(i);
duke@435 213 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 214 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 215 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 216 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 217 }
duke@435 218 }
duke@435 219
duke@435 220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 221 for (int i = 1; i < IndexSetSize; i++) {
duke@435 222 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 223 "Indexed free list sizes are incorrect");
duke@435 224 _indexedFreeList[i].reset(IndexSetSize);
duke@435 225 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 226 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 227 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 228 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 229 }
duke@435 230 }
duke@435 231
duke@435 232 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 233 resetIndexedFreeListArray();
duke@435 234 dictionary()->reset();
duke@435 235 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 236 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 237 // Everything's allocated until proven otherwise.
duke@435 238 _bt.set_unallocated_block(end());
duke@435 239 }
duke@435 240 if (!mr.is_empty()) {
duke@435 241 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 242 _bt.single_block(mr.start(), mr.word_size());
duke@435 243 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 244 fc->setSize(mr.word_size());
duke@435 245 if (mr.word_size() >= IndexSetSize ) {
duke@435 246 returnChunkToDictionary(fc);
duke@435 247 } else {
duke@435 248 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 249 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 250 }
duke@435 251 }
duke@435 252 _promoInfo.reset();
duke@435 253 _smallLinearAllocBlock._ptr = NULL;
duke@435 254 _smallLinearAllocBlock._word_size = 0;
duke@435 255 }
duke@435 256
duke@435 257 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 258 // Reset the space to the new reality - one free chunk.
duke@435 259 MemRegion mr(compaction_top(), end());
duke@435 260 reset(mr);
duke@435 261 // Now refill the linear allocation block(s) if possible.
duke@435 262 if (_adaptive_freelists) {
duke@435 263 refillLinearAllocBlocksIfNeeded();
duke@435 264 } else {
duke@435 265 // Place as much of mr in the linAB as we can get,
duke@435 266 // provided it was big enough to go into the dictionary.
duke@435 267 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 268 if (fc != NULL) {
duke@435 269 assert(fc->size() == mr.word_size(),
duke@435 270 "Why was the chunk broken up?");
duke@435 271 removeChunkFromDictionary(fc);
duke@435 272 HeapWord* addr = (HeapWord*) fc;
duke@435 273 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 274 1024*SmallForLinearAlloc, fc->size());
duke@435 275 // Note that _unallocated_block is not updated here.
duke@435 276 }
duke@435 277 }
duke@435 278 }
duke@435 279
duke@435 280 // Walks the entire dictionary, returning a coterminal
duke@435 281 // chunk, if it exists. Use with caution since it involves
duke@435 282 // a potentially complete walk of a potentially large tree.
duke@435 283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 284
duke@435 285 assert_lock_strong(&_freelistLock);
duke@435 286
duke@435 287 return dictionary()->find_chunk_ends_at(end());
duke@435 288 }
duke@435 289
duke@435 290
duke@435 291 #ifndef PRODUCT
duke@435 292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 293 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 294 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 295 }
duke@435 296 }
duke@435 297
duke@435 298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 299 size_t sum = 0;
duke@435 300 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 301 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 302 }
duke@435 303 return sum;
duke@435 304 }
duke@435 305
duke@435 306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 307 size_t count = 0;
duke@435 308 for (int i = MinChunkSize; i < IndexSetSize; i++) {
duke@435 309 debug_only(
duke@435 310 ssize_t total_list_count = 0;
duke@435 311 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 312 fc = fc->next()) {
duke@435 313 total_list_count++;
duke@435 314 }
duke@435 315 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 316 "Count in list is incorrect");
duke@435 317 )
duke@435 318 count += _indexedFreeList[i].count();
duke@435 319 }
duke@435 320 return count;
duke@435 321 }
duke@435 322
duke@435 323 size_t CompactibleFreeListSpace::totalCount() {
duke@435 324 size_t num = totalCountInIndexedFreeLists();
duke@435 325 num += dictionary()->totalCount();
duke@435 326 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 327 num++;
duke@435 328 }
duke@435 329 return num;
duke@435 330 }
duke@435 331 #endif
duke@435 332
duke@435 333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 334 FreeChunk* fc = (FreeChunk*) p;
duke@435 335 return fc->isFree();
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::used() const {
duke@435 339 return capacity() - free();
duke@435 340 }
duke@435 341
duke@435 342 size_t CompactibleFreeListSpace::free() const {
duke@435 343 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 344 // if you do this while the structures are in flux you
duke@435 345 // may get an approximate answer only; for instance
duke@435 346 // because there is concurrent allocation either
duke@435 347 // directly by mutators or for promotion during a GC.
duke@435 348 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 349 // not to crash and burn, for instance, because of walking
duke@435 350 // pointers that could disappear as you were walking them.
duke@435 351 // The approximation is because the various components
duke@435 352 // that are read below are not read atomically (and
duke@435 353 // further the computation of totalSizeInIndexedFreeLists()
duke@435 354 // is itself a non-atomic computation. The normal use of
duke@435 355 // this is during a resize operation at the end of GC
duke@435 356 // and at that time you are guaranteed to get the
duke@435 357 // correct actual value. However, for instance, this is
duke@435 358 // also read completely asynchronously by the "perf-sampler"
duke@435 359 // that supports jvmstat, and you are apt to see the values
duke@435 360 // flicker in such cases.
duke@435 361 assert(_dictionary != NULL, "No _dictionary?");
duke@435 362 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 363 totalSizeInIndexedFreeLists() +
duke@435 364 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 365 }
duke@435 366
duke@435 367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 368 assert(_dictionary != NULL, "No _dictionary?");
duke@435 369 assert_locked();
duke@435 370 size_t res = _dictionary->maxChunkSize();
duke@435 371 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 372 (size_t) SmallForLinearAlloc - 1));
duke@435 373 // XXX the following could potentially be pretty slow;
duke@435 374 // should one, pesimally for the rare cases when res
duke@435 375 // caclulated above is less than IndexSetSize,
duke@435 376 // just return res calculated above? My reasoning was that
duke@435 377 // those cases will be so rare that the extra time spent doesn't
duke@435 378 // really matter....
duke@435 379 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 380 // to i > res below, because i is unsigned and res may be zero.
duke@435 381 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 382 i -= IndexSetStride) {
duke@435 383 if (_indexedFreeList[i].head() != NULL) {
duke@435 384 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 385 return i;
duke@435 386 }
duke@435 387 }
duke@435 388 return res;
duke@435 389 }
duke@435 390
duke@435 391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 392 assert_lock_strong(&_freelistLock);
duke@435 393 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 394 _dictionary->reportStatistics();
duke@435 395 if (PrintFLSStatistics > 1) {
duke@435 396 reportIndexedFreeListStatistics();
duke@435 397 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 398 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 399 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 400 }
duke@435 401 }
duke@435 402
duke@435 403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 404 assert_lock_strong(&_freelistLock);
duke@435 405 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 406 "--------------------------------\n");
duke@435 407 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 408 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 409 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 410 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 411 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 412 if (freeBlocks != 0) {
duke@435 413 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 414 }
duke@435 415 }
duke@435 416
duke@435 417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 418 size_t res = 0;
duke@435 419 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 420 debug_only(
duke@435 421 ssize_t recount = 0;
duke@435 422 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 423 fc = fc->next()) {
duke@435 424 recount += 1;
duke@435 425 }
duke@435 426 assert(recount == _indexedFreeList[i].count(),
duke@435 427 "Incorrect count in list");
duke@435 428 )
duke@435 429 res += _indexedFreeList[i].count();
duke@435 430 }
duke@435 431 return res;
duke@435 432 }
duke@435 433
duke@435 434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 435 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 436 if (_indexedFreeList[i].head() != NULL) {
duke@435 437 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 438 return (size_t)i;
duke@435 439 }
duke@435 440 }
duke@435 441 return 0;
duke@435 442 }
duke@435 443
duke@435 444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 445 HeapWord* prevEnd = end();
duke@435 446 assert(prevEnd != value, "unnecessary set_end call");
duke@435 447 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
duke@435 448 _end = value;
duke@435 449 if (prevEnd != NULL) {
duke@435 450 // Resize the underlying block offset table.
duke@435 451 _bt.resize(pointer_delta(value, bottom()));
duke@435 452 if (value <= prevEnd) {
duke@435 453 assert(value >= unallocated_block(), "New end is below unallocated block");
duke@435 454 } else {
duke@435 455 // Now, take this new chunk and add it to the free blocks.
duke@435 456 // Note that the BOT has not yet been updated for this block.
duke@435 457 size_t newFcSize = pointer_delta(value, prevEnd);
duke@435 458 // XXX This is REALLY UGLY and should be fixed up. XXX
duke@435 459 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
duke@435 460 // Mark the boundary of the new block in BOT
duke@435 461 _bt.mark_block(prevEnd, value);
duke@435 462 // put it all in the linAB
duke@435 463 if (ParallelGCThreads == 0) {
duke@435 464 _smallLinearAllocBlock._ptr = prevEnd;
duke@435 465 _smallLinearAllocBlock._word_size = newFcSize;
duke@435 466 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 467 } else { // ParallelGCThreads > 0
duke@435 468 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 469 Mutex::_no_safepoint_check_flag);
duke@435 470 _smallLinearAllocBlock._ptr = prevEnd;
duke@435 471 _smallLinearAllocBlock._word_size = newFcSize;
duke@435 472 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 473 }
duke@435 474 // Births of chunks put into a LinAB are not recorded. Births
duke@435 475 // of chunks as they are allocated out of a LinAB are.
duke@435 476 } else {
duke@435 477 // Add the block to the free lists, if possible coalescing it
duke@435 478 // with the last free block, and update the BOT and census data.
duke@435 479 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 480 }
duke@435 481 }
duke@435 482 }
duke@435 483 }
duke@435 484
duke@435 485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 486 CompactibleFreeListSpace* _cfls;
duke@435 487 CMSCollector* _collector;
duke@435 488 protected:
duke@435 489 // Override.
duke@435 490 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 491 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 492 HeapWord* bottom, HeapWord* top, \
duke@435 493 ClosureType* cl); \
duke@435 494 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 495 HeapWord* bottom, HeapWord* top, \
duke@435 496 ClosureType* cl); \
duke@435 497 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 498 HeapWord* bottom, HeapWord* top, \
duke@435 499 ClosureType* cl)
duke@435 500 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 501 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 502
duke@435 503 public:
duke@435 504 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 505 CMSCollector* collector,
duke@435 506 OopClosure* cl,
duke@435 507 CardTableModRefBS::PrecisionStyle precision,
duke@435 508 HeapWord* boundary) :
duke@435 509 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 510 _cfls(sp), _collector(collector) {}
duke@435 511 };
duke@435 512
duke@435 513 // We de-virtualize the block-related calls below, since we know that our
duke@435 514 // space is a CompactibleFreeListSpace.
duke@435 515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 517 HeapWord* bottom, \
duke@435 518 HeapWord* top, \
duke@435 519 ClosureType* cl) { \
duke@435 520 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 521 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 522 } else { \
duke@435 523 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 524 } \
duke@435 525 } \
duke@435 526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 527 HeapWord* bottom, \
duke@435 528 HeapWord* top, \
duke@435 529 ClosureType* cl) { \
duke@435 530 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 531 back too far. */ \
duke@435 532 HeapWord* mr_start = mr.start(); \
duke@435 533 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 534 HeapWord* next = bottom + bot_size; \
duke@435 535 while (next < mr_start) { \
duke@435 536 bottom = next; \
duke@435 537 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 538 next = bottom + bot_size; \
duke@435 539 } \
duke@435 540 \
duke@435 541 while (bottom < top) { \
duke@435 542 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 543 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 544 oop(bottom)) && \
duke@435 545 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 546 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 547 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 548 } else { \
duke@435 549 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 550 } \
duke@435 551 } \
duke@435 552 } \
duke@435 553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 554 HeapWord* bottom, \
duke@435 555 HeapWord* top, \
duke@435 556 ClosureType* cl) { \
duke@435 557 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 558 back too far. */ \
duke@435 559 HeapWord* mr_start = mr.start(); \
duke@435 560 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 561 HeapWord* next = bottom + bot_size; \
duke@435 562 while (next < mr_start) { \
duke@435 563 bottom = next; \
duke@435 564 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 565 next = bottom + bot_size; \
duke@435 566 } \
duke@435 567 \
duke@435 568 while (bottom < top) { \
duke@435 569 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 570 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 571 oop(bottom)) && \
duke@435 572 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 573 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 574 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 575 } else { \
duke@435 576 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 577 } \
duke@435 578 } \
duke@435 579 }
duke@435 580
duke@435 581 // (There are only two of these, rather than N, because the split is due
duke@435 582 // only to the introduction of the FilteringClosure, a local part of the
duke@435 583 // impl of this abstraction.)
duke@435 584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 586
duke@435 587 DirtyCardToOopClosure*
duke@435 588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 589 CardTableModRefBS::PrecisionStyle precision,
duke@435 590 HeapWord* boundary) {
duke@435 591 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 592 }
duke@435 593
duke@435 594
duke@435 595 // Note on locking for the space iteration functions:
duke@435 596 // since the collector's iteration activities are concurrent with
duke@435 597 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 598 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 599 // may suddenly be allocated or divided up and part of it allocated and
duke@435 600 // so on.
duke@435 601
duke@435 602 // Apply the given closure to each block in the space.
duke@435 603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 604 assert_lock_strong(freelistLock());
duke@435 605 HeapWord *cur, *limit;
duke@435 606 for (cur = bottom(), limit = end(); cur < limit;
duke@435 607 cur += cl->do_blk_careful(cur));
duke@435 608 }
duke@435 609
duke@435 610 // Apply the given closure to each block in the space.
duke@435 611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 612 assert_lock_strong(freelistLock());
duke@435 613 HeapWord *cur, *limit;
duke@435 614 for (cur = bottom(), limit = end(); cur < limit;
duke@435 615 cur += cl->do_blk(cur));
duke@435 616 }
duke@435 617
duke@435 618 // Apply the given closure to each oop in the space.
duke@435 619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 620 assert_lock_strong(freelistLock());
duke@435 621 HeapWord *cur, *limit;
duke@435 622 size_t curSize;
duke@435 623 for (cur = bottom(), limit = end(); cur < limit;
duke@435 624 cur += curSize) {
duke@435 625 curSize = block_size(cur);
duke@435 626 if (block_is_obj(cur)) {
duke@435 627 oop(cur)->oop_iterate(cl);
duke@435 628 }
duke@435 629 }
duke@435 630 }
duke@435 631
duke@435 632 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 634 assert_lock_strong(freelistLock());
duke@435 635 if (is_empty()) {
duke@435 636 return;
duke@435 637 }
duke@435 638 MemRegion cur = MemRegion(bottom(), end());
duke@435 639 mr = mr.intersection(cur);
duke@435 640 if (mr.is_empty()) {
duke@435 641 return;
duke@435 642 }
duke@435 643 if (mr.equals(cur)) {
duke@435 644 oop_iterate(cl);
duke@435 645 return;
duke@435 646 }
duke@435 647 assert(mr.end() <= end(), "just took an intersection above");
duke@435 648 HeapWord* obj_addr = block_start(mr.start());
duke@435 649 HeapWord* t = mr.end();
duke@435 650
duke@435 651 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 652 if (block_is_obj(obj_addr)) {
duke@435 653 // Handle first object specially.
duke@435 654 oop obj = oop(obj_addr);
duke@435 655 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 656 } else {
duke@435 657 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 658 obj_addr += fc->size();
duke@435 659 }
duke@435 660 while (obj_addr < t) {
duke@435 661 HeapWord* obj = obj_addr;
duke@435 662 obj_addr += block_size(obj_addr);
duke@435 663 // If "obj_addr" is not greater than top, then the
duke@435 664 // entire object "obj" is within the region.
duke@435 665 if (obj_addr <= t) {
duke@435 666 if (block_is_obj(obj)) {
duke@435 667 oop(obj)->oop_iterate(cl);
duke@435 668 }
duke@435 669 } else {
duke@435 670 // "obj" extends beyond end of region
duke@435 671 if (block_is_obj(obj)) {
duke@435 672 oop(obj)->oop_iterate(&smr_blk);
duke@435 673 }
duke@435 674 break;
duke@435 675 }
duke@435 676 }
duke@435 677 }
duke@435 678
duke@435 679 // NOTE: In the following methods, in order to safely be able to
duke@435 680 // apply the closure to an object, we need to be sure that the
duke@435 681 // object has been initialized. We are guaranteed that an object
duke@435 682 // is initialized if we are holding the Heap_lock with the
duke@435 683 // world stopped.
duke@435 684 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 685 if (is_init_completed()) {
duke@435 686 assert_locked_or_safepoint(Heap_lock);
duke@435 687 if (Universe::is_fully_initialized()) {
duke@435 688 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 689 "Required for objects to be initialized");
duke@435 690 }
duke@435 691 } // else make a concession at vm start-up
duke@435 692 }
duke@435 693
duke@435 694 // Apply the given closure to each object in the space
duke@435 695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 696 assert_lock_strong(freelistLock());
duke@435 697 NOT_PRODUCT(verify_objects_initialized());
duke@435 698 HeapWord *cur, *limit;
duke@435 699 size_t curSize;
duke@435 700 for (cur = bottom(), limit = end(); cur < limit;
duke@435 701 cur += curSize) {
duke@435 702 curSize = block_size(cur);
duke@435 703 if (block_is_obj(cur)) {
duke@435 704 blk->do_object(oop(cur));
duke@435 705 }
duke@435 706 }
duke@435 707 }
duke@435 708
jmasa@952 709 // Apply the given closure to each live object in the space
jmasa@952 710 // The usage of CompactibleFreeListSpace
jmasa@952 711 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 712 // objects in the space with references to objects that are no longer
jmasa@952 713 // valid. For example, an object may reference another object
jmasa@952 714 // that has already been sweep up (collected). This method uses
jmasa@952 715 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 716 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 717 // object is decided.
jmasa@952 718
jmasa@952 719 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 720 assert_lock_strong(freelistLock());
jmasa@952 721 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 722 HeapWord *cur, *limit;
jmasa@952 723 size_t curSize;
jmasa@952 724 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 725 cur += curSize) {
jmasa@952 726 curSize = block_size(cur);
jmasa@952 727 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 728 blk->do_object(oop(cur));
jmasa@952 729 }
jmasa@952 730 }
jmasa@952 731 }
jmasa@952 732
duke@435 733 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 734 UpwardsObjectClosure* cl) {
duke@435 735 assert_locked();
duke@435 736 NOT_PRODUCT(verify_objects_initialized());
duke@435 737 Space::object_iterate_mem(mr, cl);
duke@435 738 }
duke@435 739
duke@435 740 // Callers of this iterator beware: The closure application should
duke@435 741 // be robust in the face of uninitialized objects and should (always)
duke@435 742 // return a correct size so that the next addr + size below gives us a
duke@435 743 // valid block boundary. [See for instance,
duke@435 744 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 745 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 746 HeapWord*
duke@435 747 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 748 assert_lock_strong(freelistLock());
duke@435 749 HeapWord *addr, *last;
duke@435 750 size_t size;
duke@435 751 for (addr = bottom(), last = end();
duke@435 752 addr < last; addr += size) {
duke@435 753 FreeChunk* fc = (FreeChunk*)addr;
duke@435 754 if (fc->isFree()) {
duke@435 755 // Since we hold the free list lock, which protects direct
duke@435 756 // allocation in this generation by mutators, a free object
duke@435 757 // will remain free throughout this iteration code.
duke@435 758 size = fc->size();
duke@435 759 } else {
duke@435 760 // Note that the object need not necessarily be initialized,
duke@435 761 // because (for instance) the free list lock does NOT protect
duke@435 762 // object initialization. The closure application below must
duke@435 763 // therefore be correct in the face of uninitialized objects.
duke@435 764 size = cl->do_object_careful(oop(addr));
duke@435 765 if (size == 0) {
duke@435 766 // An unparsable object found. Signal early termination.
duke@435 767 return addr;
duke@435 768 }
duke@435 769 }
duke@435 770 }
duke@435 771 return NULL;
duke@435 772 }
duke@435 773
duke@435 774 // Callers of this iterator beware: The closure application should
duke@435 775 // be robust in the face of uninitialized objects and should (always)
duke@435 776 // return a correct size so that the next addr + size below gives us a
duke@435 777 // valid block boundary. [See for instance,
duke@435 778 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 779 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 780 HeapWord*
duke@435 781 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 782 ObjectClosureCareful* cl) {
duke@435 783 assert_lock_strong(freelistLock());
duke@435 784 // Can't use used_region() below because it may not necessarily
duke@435 785 // be the same as [bottom(),end()); although we could
duke@435 786 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 787 // that appears too cumbersome, so we just do the simpler check
duke@435 788 // in the assertion below.
duke@435 789 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 790 "mr should be non-empty and within used space");
duke@435 791 HeapWord *addr, *end;
duke@435 792 size_t size;
duke@435 793 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 794 addr < end; addr += size) {
duke@435 795 FreeChunk* fc = (FreeChunk*)addr;
duke@435 796 if (fc->isFree()) {
duke@435 797 // Since we hold the free list lock, which protects direct
duke@435 798 // allocation in this generation by mutators, a free object
duke@435 799 // will remain free throughout this iteration code.
duke@435 800 size = fc->size();
duke@435 801 } else {
duke@435 802 // Note that the object need not necessarily be initialized,
duke@435 803 // because (for instance) the free list lock does NOT protect
duke@435 804 // object initialization. The closure application below must
duke@435 805 // therefore be correct in the face of uninitialized objects.
duke@435 806 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 807 if (size == 0) {
duke@435 808 // An unparsable object found. Signal early termination.
duke@435 809 return addr;
duke@435 810 }
duke@435 811 }
duke@435 812 }
duke@435 813 return NULL;
duke@435 814 }
duke@435 815
duke@435 816
ysr@777 817 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 818 NOT_PRODUCT(verify_objects_initialized());
duke@435 819 return _bt.block_start(p);
duke@435 820 }
duke@435 821
duke@435 822 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 823 return _bt.block_start_careful(p);
duke@435 824 }
duke@435 825
duke@435 826 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 827 NOT_PRODUCT(verify_objects_initialized());
duke@435 828 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 829 // This must be volatile, or else there is a danger that the compiler
duke@435 830 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 831 // the value read the first time in a register.
duke@435 832 while (true) {
duke@435 833 // We must do this until we get a consistent view of the object.
coleenp@622 834 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 835 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 836 size_t res = fc->size();
coleenp@622 837 // If the object is still a free chunk, return the size, else it
coleenp@622 838 // has been allocated so try again.
coleenp@622 839 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 840 assert(res != 0, "Block size should not be 0");
duke@435 841 return res;
duke@435 842 }
coleenp@622 843 } else {
coleenp@622 844 // must read from what 'p' points to in each loop.
coleenp@622 845 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 846 if (k != NULL) {
coleenp@622 847 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
coleenp@622 848 oop o = (oop)p;
coleenp@622 849 assert(o->is_parsable(), "Should be parsable");
coleenp@622 850 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 851 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 852 res = adjustObjectSize(res);
coleenp@622 853 assert(res != 0, "Block size should not be 0");
coleenp@622 854 return res;
coleenp@622 855 }
duke@435 856 }
duke@435 857 }
duke@435 858 }
duke@435 859
duke@435 860 // A variant of the above that uses the Printezis bits for
duke@435 861 // unparsable but allocated objects. This avoids any possible
duke@435 862 // stalls waiting for mutators to initialize objects, and is
duke@435 863 // thus potentially faster than the variant above. However,
duke@435 864 // this variant may return a zero size for a block that is
duke@435 865 // under mutation and for which a consistent size cannot be
duke@435 866 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 867 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 868 const CMSCollector* c)
duke@435 869 const {
duke@435 870 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 871 // This must be volatile, or else there is a danger that the compiler
duke@435 872 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 873 // the value read the first time in a register.
duke@435 874 DEBUG_ONLY(uint loops = 0;)
duke@435 875 while (true) {
duke@435 876 // We must do this until we get a consistent view of the object.
coleenp@622 877 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 878 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 879 size_t res = fc->size();
coleenp@622 880 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 881 assert(res != 0, "Block size should not be 0");
duke@435 882 assert(loops == 0, "Should be 0");
duke@435 883 return res;
duke@435 884 }
duke@435 885 } else {
coleenp@622 886 // must read from what 'p' points to in each loop.
coleenp@622 887 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
jmasa@953 888 if (k != NULL &&
jmasa@953 889 ((oopDesc*)p)->is_parsable() &&
jmasa@953 890 ((oopDesc*)p)->is_conc_safe()) {
coleenp@622 891 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 892 oop o = (oop)p;
coleenp@622 893 assert(o->is_oop(), "Should be an oop");
coleenp@622 894 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 895 res = adjustObjectSize(res);
coleenp@622 896 assert(res != 0, "Block size should not be 0");
coleenp@622 897 return res;
coleenp@622 898 } else {
coleenp@622 899 return c->block_size_if_printezis_bits(p);
coleenp@622 900 }
duke@435 901 }
duke@435 902 assert(loops == 0, "Can loop at most once");
duke@435 903 DEBUG_ONLY(loops++;)
duke@435 904 }
duke@435 905 }
duke@435 906
duke@435 907 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 908 NOT_PRODUCT(verify_objects_initialized());
duke@435 909 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 910 FreeChunk* fc = (FreeChunk*)p;
duke@435 911 if (fc->isFree()) {
duke@435 912 return fc->size();
duke@435 913 } else {
duke@435 914 // Ignore mark word because this may be a recently promoted
duke@435 915 // object whose mark word is used to chain together grey
duke@435 916 // objects (the last one would have a null value).
duke@435 917 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 918 return adjustObjectSize(oop(p)->size());
duke@435 919 }
duke@435 920 }
duke@435 921
duke@435 922 // This implementation assumes that the property of "being an object" is
duke@435 923 // stable. But being a free chunk may not be (because of parallel
duke@435 924 // promotion.)
duke@435 925 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 926 FreeChunk* fc = (FreeChunk*)p;
duke@435 927 assert(is_in_reserved(p), "Should be in space");
duke@435 928 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 929 // assertion may fail because prepare_for_compaction() uses
duke@435 930 // space that is garbage to maintain information on ranges of
duke@435 931 // live objects so that these live ranges can be moved as a whole.
duke@435 932 // Comment out this assertion until that problem can be solved
duke@435 933 // (i.e., that the block start calculation may look at objects
duke@435 934 // at address below "p" in finding the object that contains "p"
duke@435 935 // and those objects (if garbage) may have been modified to hold
duke@435 936 // live range information.
duke@435 937 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
coleenp@622 938 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 939 klassOop k = oop(p)->klass_or_null();
duke@435 940 if (k != NULL) {
duke@435 941 // Ignore mark word because it may have been used to
duke@435 942 // chain together promoted objects (the last one
duke@435 943 // would have a null value).
duke@435 944 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 945 return true;
duke@435 946 } else {
duke@435 947 return false; // Was not an object at the start of collection.
duke@435 948 }
duke@435 949 }
duke@435 950
duke@435 951 // Check if the object is alive. This fact is checked either by consulting
duke@435 952 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 953 // generation and we're not in the sweeping phase, by checking the
duke@435 954 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 955 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 956 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
duke@435 957 assert (block_is_obj(p), "The address should point to an object");
duke@435 958
duke@435 959 // If we're sweeping, we use object liveness information from the main bit map
duke@435 960 // for both perm gen and old gen.
duke@435 961 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 962 // EITHER we are in the middle of the sweeping phase, and the
duke@435 963 // main marking bit map (live_map below) is locked,
duke@435 964 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 965 // is stable, because it's mutated only in the sweeping phase.
duke@435 966 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 967 CMSBitMap* live_map = _collector->markBitMap();
duke@435 968 return live_map->isMarked((HeapWord*) p);
duke@435 969 } else {
duke@435 970 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 971 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 972 // in the perm gen. In this case, we use the "deadness" information
duke@435 973 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 974 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 975 if (_collector->verifying()) {
duke@435 976 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 977 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 978 // when we know that it's dead; if the bitmap is not allocated then
duke@435 979 // the object is alive.
duke@435 980 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 981 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 982 } else {
duke@435 983 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 984 }
duke@435 985 }
duke@435 986 }
duke@435 987 return true;
duke@435 988 }
duke@435 989
duke@435 990 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 991 FreeChunk* fc = (FreeChunk*)p;
duke@435 992 assert(is_in_reserved(p), "Should be in space");
duke@435 993 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 994 if (!fc->isFree()) {
duke@435 995 // Ignore mark word because it may have been used to
duke@435 996 // chain together promoted objects (the last one
duke@435 997 // would have a null value).
duke@435 998 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 999 return true;
duke@435 1000 }
duke@435 1001 return false;
duke@435 1002 }
duke@435 1003
duke@435 1004 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1005 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1006 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1007 size_t size = 0;
duke@435 1008 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1009 debug_only(
duke@435 1010 // We may be calling here without the lock in which case we
duke@435 1011 // won't do this modest sanity check.
duke@435 1012 if (freelistLock()->owned_by_self()) {
duke@435 1013 size_t total_list_size = 0;
duke@435 1014 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1015 fc = fc->next()) {
duke@435 1016 total_list_size += i;
duke@435 1017 }
duke@435 1018 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1019 "Count in list is incorrect");
duke@435 1020 }
duke@435 1021 )
duke@435 1022 size += i * _indexedFreeList[i].count();
duke@435 1023 }
duke@435 1024 return size;
duke@435 1025 }
duke@435 1026
duke@435 1027 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1028 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1029 return allocate(size);
duke@435 1030 }
duke@435 1031
duke@435 1032 HeapWord*
duke@435 1033 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1034 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1035 }
duke@435 1036
duke@435 1037 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1038 assert_lock_strong(freelistLock());
duke@435 1039 HeapWord* res = NULL;
duke@435 1040 assert(size == adjustObjectSize(size),
duke@435 1041 "use adjustObjectSize() before calling into allocate()");
duke@435 1042
duke@435 1043 if (_adaptive_freelists) {
duke@435 1044 res = allocate_adaptive_freelists(size);
duke@435 1045 } else { // non-adaptive free lists
duke@435 1046 res = allocate_non_adaptive_freelists(size);
duke@435 1047 }
duke@435 1048
duke@435 1049 if (res != NULL) {
duke@435 1050 // check that res does lie in this space!
duke@435 1051 assert(is_in_reserved(res), "Not in this space!");
duke@435 1052 assert(is_aligned((void*)res), "alignment check");
duke@435 1053
duke@435 1054 FreeChunk* fc = (FreeChunk*)res;
duke@435 1055 fc->markNotFree();
duke@435 1056 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1057 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1058 // Verify that the block offset table shows this to
duke@435 1059 // be a single block, but not one which is unallocated.
duke@435 1060 _bt.verify_single_block(res, size);
duke@435 1061 _bt.verify_not_unallocated(res, size);
duke@435 1062 // mangle a just allocated object with a distinct pattern.
duke@435 1063 debug_only(fc->mangleAllocated(size));
duke@435 1064 }
duke@435 1065
duke@435 1066 return res;
duke@435 1067 }
duke@435 1068
duke@435 1069 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1070 HeapWord* res = NULL;
duke@435 1071 // try and use linear allocation for smaller blocks
duke@435 1072 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1073 // if successful, the following also adjusts block offset table
duke@435 1074 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1075 }
duke@435 1076 // Else triage to indexed lists for smaller sizes
duke@435 1077 if (res == NULL) {
duke@435 1078 if (size < SmallForDictionary) {
duke@435 1079 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1080 } else {
duke@435 1081 // else get it from the big dictionary; if even this doesn't
duke@435 1082 // work we are out of luck.
duke@435 1083 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1084 }
duke@435 1085 }
duke@435 1086
duke@435 1087 return res;
duke@435 1088 }
duke@435 1089
duke@435 1090 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1091 assert_lock_strong(freelistLock());
duke@435 1092 HeapWord* res = NULL;
duke@435 1093 assert(size == adjustObjectSize(size),
duke@435 1094 "use adjustObjectSize() before calling into allocate()");
duke@435 1095
duke@435 1096 // Strategy
duke@435 1097 // if small
duke@435 1098 // exact size from small object indexed list if small
duke@435 1099 // small or large linear allocation block (linAB) as appropriate
duke@435 1100 // take from lists of greater sized chunks
duke@435 1101 // else
duke@435 1102 // dictionary
duke@435 1103 // small or large linear allocation block if it has the space
duke@435 1104 // Try allocating exact size from indexTable first
duke@435 1105 if (size < IndexSetSize) {
duke@435 1106 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1107 if(res != NULL) {
duke@435 1108 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1109 "Not removed from free list");
duke@435 1110 // no block offset table adjustment is necessary on blocks in
duke@435 1111 // the indexed lists.
duke@435 1112
duke@435 1113 // Try allocating from the small LinAB
duke@435 1114 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1115 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1116 // if successful, the above also adjusts block offset table
duke@435 1117 // Note that this call will refill the LinAB to
duke@435 1118 // satisfy the request. This is different that
duke@435 1119 // evm.
duke@435 1120 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1121
duke@435 1122 } else {
duke@435 1123 // Raid the exact free lists larger than size, even if they are not
duke@435 1124 // overpopulated.
duke@435 1125 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1126 }
duke@435 1127 } else {
duke@435 1128 // Big objects get allocated directly from the dictionary.
duke@435 1129 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1130 if (res == NULL) {
duke@435 1131 // Try hard not to fail since an allocation failure will likely
duke@435 1132 // trigger a synchronous GC. Try to get the space from the
duke@435 1133 // allocation blocks.
duke@435 1134 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1135 }
duke@435 1136 }
duke@435 1137
duke@435 1138 return res;
duke@435 1139 }
duke@435 1140
duke@435 1141 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1142 // when promoting obj.
duke@435 1143 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1144 // Depending on the object size, expansion may require refilling either a
duke@435 1145 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1146 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1147 size_t space = obj_size;
duke@435 1148 if (!_adaptive_freelists) {
duke@435 1149 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1150 }
duke@435 1151 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1152 return space;
duke@435 1153 }
duke@435 1154
duke@435 1155 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1156 FreeChunk* ret;
duke@435 1157
duke@435 1158 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1159 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1160 "Should not be here");
duke@435 1161
duke@435 1162 size_t i;
duke@435 1163 size_t currSize = numWords + MinChunkSize;
duke@435 1164 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1165 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1166 FreeList* fl = &_indexedFreeList[i];
duke@435 1167 if (fl->head()) {
duke@435 1168 ret = getFromListGreater(fl, numWords);
duke@435 1169 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1170 return ret;
duke@435 1171 }
duke@435 1172 }
duke@435 1173
duke@435 1174 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1175 (size_t)(numWords + MinChunkSize));
duke@435 1176
duke@435 1177 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1178 fragmentation that can't be handled. */
duke@435 1179 {
duke@435 1180 ret = dictionary()->getChunk(currSize);
duke@435 1181 if (ret != NULL) {
duke@435 1182 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1183 "Chunk is too small");
duke@435 1184 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1185 /* Carve returned chunk. */
duke@435 1186 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1187 /* Label this as no longer a free chunk. */
duke@435 1188 assert(ret->isFree(), "This chunk should be free");
duke@435 1189 ret->linkPrev(NULL);
duke@435 1190 }
duke@435 1191 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1192 return ret;
duke@435 1193 }
duke@435 1194 ShouldNotReachHere();
duke@435 1195 }
duke@435 1196
duke@435 1197 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
duke@435 1198 const {
duke@435 1199 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1200 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1201 }
duke@435 1202
duke@435 1203 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
duke@435 1204 if (fc->size() >= IndexSetSize) {
duke@435 1205 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1206 } else {
duke@435 1207 return verifyChunkInIndexedFreeLists(fc);
duke@435 1208 }
duke@435 1209 }
duke@435 1210
duke@435 1211 #ifndef PRODUCT
duke@435 1212 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1213 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1214 }
duke@435 1215 #endif
duke@435 1216
duke@435 1217 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1218 // In the parallel case, the main thread holds the free list lock
duke@435 1219 // on behalf the parallel threads.
duke@435 1220 assert_locked();
duke@435 1221 FreeChunk* fc;
duke@435 1222 {
duke@435 1223 // If GC is parallel, this might be called by several threads.
duke@435 1224 // This should be rare enough that the locking overhead won't affect
duke@435 1225 // the sequential code.
duke@435 1226 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1227 Mutex::_no_safepoint_check_flag);
duke@435 1228 fc = getChunkFromDictionary(size);
duke@435 1229 }
duke@435 1230 if (fc != NULL) {
duke@435 1231 fc->dontCoalesce();
duke@435 1232 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1233 // Verify that the block offset table shows this to
duke@435 1234 // be a single block, but not one which is unallocated.
duke@435 1235 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1236 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1237 }
duke@435 1238 return fc;
duke@435 1239 }
duke@435 1240
coleenp@548 1241 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1242 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1243 assert_locked();
duke@435 1244
duke@435 1245 // if we are tracking promotions, then first ensure space for
duke@435 1246 // promotion (including spooling space for saving header if necessary).
duke@435 1247 // then allocate and copy, then track promoted info if needed.
duke@435 1248 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1249 // be displaced and in this case restoration of the mark word
duke@435 1250 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1251 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1252 return NULL;
duke@435 1253 }
duke@435 1254 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1255 // additional call through the allocate(size_t) form. Having
duke@435 1256 // the compile inline the call is problematic because allocate(size_t)
duke@435 1257 // is a virtual method.
duke@435 1258 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1259 if (res != NULL) {
duke@435 1260 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1261 // if we should be tracking promotions, do so.
duke@435 1262 if (_promoInfo.tracking()) {
duke@435 1263 _promoInfo.track((PromotedObject*)res);
duke@435 1264 }
duke@435 1265 }
duke@435 1266 return oop(res);
duke@435 1267 }
duke@435 1268
duke@435 1269 HeapWord*
duke@435 1270 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1271 assert_locked();
duke@435 1272 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1273 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1274 "maximum from smallLinearAllocBlock");
duke@435 1275 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1276 }
duke@435 1277
duke@435 1278 HeapWord*
duke@435 1279 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1280 size_t size) {
duke@435 1281 assert_locked();
duke@435 1282 assert(size >= MinChunkSize, "too small");
duke@435 1283 HeapWord* res = NULL;
duke@435 1284 // Try to do linear allocation from blk, making sure that
duke@435 1285 if (blk->_word_size == 0) {
duke@435 1286 // We have probably been unable to fill this either in the prologue or
duke@435 1287 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1288 // next time.
duke@435 1289 assert(blk->_ptr == NULL, "consistency check");
duke@435 1290 return NULL;
duke@435 1291 }
duke@435 1292 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1293 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1294 if (res != NULL) return res;
duke@435 1295
duke@435 1296 // about to exhaust this linear allocation block
duke@435 1297 if (blk->_word_size == size) { // exactly satisfied
duke@435 1298 res = blk->_ptr;
duke@435 1299 _bt.allocated(res, blk->_word_size);
duke@435 1300 } else if (size + MinChunkSize <= blk->_refillSize) {
duke@435 1301 // Update _unallocated_block if the size is such that chunk would be
duke@435 1302 // returned to the indexed free list. All other chunks in the indexed
duke@435 1303 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1304 // has already been adjusted for them. Do it here so that the cost
duke@435 1305 // for all chunks added back to the indexed free lists.
duke@435 1306 if (blk->_word_size < SmallForDictionary) {
duke@435 1307 _bt.allocated(blk->_ptr, blk->_word_size);
duke@435 1308 }
duke@435 1309 // Return the chunk that isn't big enough, and then refill below.
duke@435 1310 addChunkToFreeLists(blk->_ptr, blk->_word_size);
duke@435 1311 _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
duke@435 1312 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1313 } else {
duke@435 1314 // A refilled block would not satisfy the request.
duke@435 1315 return NULL;
duke@435 1316 }
duke@435 1317
duke@435 1318 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1319 refillLinearAllocBlock(blk);
duke@435 1320 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1321 "block was replenished");
duke@435 1322 if (res != NULL) {
duke@435 1323 splitBirth(size);
duke@435 1324 repairLinearAllocBlock(blk);
duke@435 1325 } else if (blk->_ptr != NULL) {
duke@435 1326 res = blk->_ptr;
duke@435 1327 size_t blk_size = blk->_word_size;
duke@435 1328 blk->_word_size -= size;
duke@435 1329 blk->_ptr += size;
duke@435 1330 splitBirth(size);
duke@435 1331 repairLinearAllocBlock(blk);
duke@435 1332 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1333 // view of the BOT and free blocks.
duke@435 1334 // Above must occur before BOT is updated below.
duke@435 1335 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1336 }
duke@435 1337 return res;
duke@435 1338 }
duke@435 1339
duke@435 1340 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1341 LinearAllocBlock* blk,
duke@435 1342 size_t size) {
duke@435 1343 assert_locked();
duke@435 1344 assert(size >= MinChunkSize, "too small");
duke@435 1345
duke@435 1346 HeapWord* res = NULL;
duke@435 1347 // This is the common case. Keep it simple.
duke@435 1348 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1349 assert(blk->_ptr != NULL, "consistency check");
duke@435 1350 res = blk->_ptr;
duke@435 1351 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1352 // indicates the start of the linAB. The split_block() updates the
duke@435 1353 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1354 // next chunk to be allocated).
duke@435 1355 size_t blk_size = blk->_word_size;
duke@435 1356 blk->_word_size -= size;
duke@435 1357 blk->_ptr += size;
duke@435 1358 splitBirth(size);
duke@435 1359 repairLinearAllocBlock(blk);
duke@435 1360 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1361 // view of the BOT and free blocks.
duke@435 1362 // Above must occur before BOT is updated below.
duke@435 1363 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1364 _bt.allocated(res, size);
duke@435 1365 }
duke@435 1366 return res;
duke@435 1367 }
duke@435 1368
duke@435 1369 FreeChunk*
duke@435 1370 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1371 assert_locked();
duke@435 1372 assert(size < SmallForDictionary, "just checking");
duke@435 1373 FreeChunk* res;
duke@435 1374 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1375 if (res == NULL) {
duke@435 1376 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1377 }
duke@435 1378 _bt.verify_not_unallocated((HeapWord*) res, size);
duke@435 1379 return res;
duke@435 1380 }
duke@435 1381
duke@435 1382 FreeChunk*
duke@435 1383 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
duke@435 1384 assert_locked();
duke@435 1385 FreeChunk* fc = NULL;
duke@435 1386 if (size < SmallForDictionary) {
duke@435 1387 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1388 _indexedFreeList[size].surplus() <= 0,
duke@435 1389 "List for this size should be empty or under populated");
duke@435 1390 // Try best fit in exact lists before replenishing the list
duke@435 1391 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1392 // Replenish list.
duke@435 1393 //
duke@435 1394 // Things tried that failed.
duke@435 1395 // Tried allocating out of the two LinAB's first before
duke@435 1396 // replenishing lists.
duke@435 1397 // Tried small linAB of size 256 (size in indexed list)
duke@435 1398 // and replenishing indexed lists from the small linAB.
duke@435 1399 //
duke@435 1400 FreeChunk* newFc = NULL;
duke@435 1401 size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1402 if (replenish_size < SmallForDictionary) {
duke@435 1403 // Do not replenish from an underpopulated size.
duke@435 1404 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1405 _indexedFreeList[replenish_size].head() != NULL) {
duke@435 1406 newFc =
duke@435 1407 _indexedFreeList[replenish_size].getChunkAtHead();
duke@435 1408 } else {
duke@435 1409 newFc = bestFitSmall(replenish_size);
duke@435 1410 }
duke@435 1411 }
duke@435 1412 if (newFc != NULL) {
duke@435 1413 splitDeath(replenish_size);
duke@435 1414 } else if (replenish_size > size) {
duke@435 1415 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
duke@435 1416 newFc =
duke@435 1417 getChunkFromIndexedFreeListHelper(replenish_size);
duke@435 1418 }
duke@435 1419 if (newFc != NULL) {
duke@435 1420 assert(newFc->size() == replenish_size, "Got wrong size");
duke@435 1421 size_t i;
duke@435 1422 FreeChunk *curFc, *nextFc;
duke@435 1423 // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
duke@435 1424 // The last chunk is not added to the lists but is returned as the
duke@435 1425 // free chunk.
duke@435 1426 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
duke@435 1427 i = 0;
duke@435 1428 i < (CMSIndexedFreeListReplenish - 1);
duke@435 1429 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
duke@435 1430 i++) {
duke@435 1431 curFc->setSize(size);
duke@435 1432 // Don't record this as a return in order to try and
duke@435 1433 // determine the "returns" from a GC.
duke@435 1434 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1435 _indexedFreeList[size].returnChunkAtTail(curFc, false);
duke@435 1436 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1437 splitBirth(size);
duke@435 1438 // Don't record the initial population of the indexed list
duke@435 1439 // as a split birth.
duke@435 1440 }
duke@435 1441
duke@435 1442 // check that the arithmetic was OK above
duke@435 1443 assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
duke@435 1444 "inconsistency in carving newFc");
duke@435 1445 curFc->setSize(size);
duke@435 1446 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1447 splitBirth(size);
duke@435 1448 return curFc;
duke@435 1449 }
duke@435 1450 }
duke@435 1451 } else {
duke@435 1452 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1453 // replenish the indexed free list.
duke@435 1454 fc = getChunkFromDictionaryExact(size);
duke@435 1455 }
duke@435 1456 assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1457 return fc;
duke@435 1458 }
duke@435 1459
duke@435 1460 FreeChunk*
duke@435 1461 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1462 assert_locked();
duke@435 1463 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1464 if (fc == NULL) {
duke@435 1465 return NULL;
duke@435 1466 }
duke@435 1467 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1468 if (fc->size() >= size + MinChunkSize) {
duke@435 1469 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1470 }
duke@435 1471 assert(fc->size() >= size, "chunk too small");
duke@435 1472 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1473 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1474 return fc;
duke@435 1475 }
duke@435 1476
duke@435 1477 FreeChunk*
duke@435 1478 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1479 assert_locked();
duke@435 1480 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1481 if (fc == NULL) {
duke@435 1482 return fc;
duke@435 1483 }
duke@435 1484 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1485 if (fc->size() == size) {
duke@435 1486 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1487 return fc;
duke@435 1488 }
duke@435 1489 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1490 if (fc->size() < size + MinChunkSize) {
duke@435 1491 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1492 returnChunkToDictionary(fc);
duke@435 1493 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1494 if (fc == NULL) {
duke@435 1495 return NULL;
duke@435 1496 }
duke@435 1497 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1498 }
duke@435 1499 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1500 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1501 assert(fc->size() == size, "chunk is wrong size");
duke@435 1502 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1503 return fc;
duke@435 1504 }
duke@435 1505
duke@435 1506 void
duke@435 1507 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1508 assert_locked();
duke@435 1509
duke@435 1510 size_t size = chunk->size();
duke@435 1511 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1512 // adjust _unallocated_block downward, as necessary
duke@435 1513 _bt.freed((HeapWord*)chunk, size);
duke@435 1514 _dictionary->returnChunk(chunk);
duke@435 1515 }
duke@435 1516
duke@435 1517 void
duke@435 1518 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1519 assert_locked();
duke@435 1520 size_t size = fc->size();
duke@435 1521 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1522 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1523 if (_adaptive_freelists) {
duke@435 1524 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1525 } else {
duke@435 1526 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1527 }
duke@435 1528 }
duke@435 1529
duke@435 1530 // Add chunk to end of last block -- if it's the largest
duke@435 1531 // block -- and update BOT and census data. We would
duke@435 1532 // of course have preferred to coalesce it with the
duke@435 1533 // last block, but it's currently less expensive to find the
duke@435 1534 // largest block than it is to find the last.
duke@435 1535 void
duke@435 1536 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1537 HeapWord* chunk, size_t size) {
duke@435 1538 // check that the chunk does lie in this space!
duke@435 1539 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1540 assert_locked();
duke@435 1541 // One of the parallel gc task threads may be here
duke@435 1542 // whilst others are allocating.
duke@435 1543 Mutex* lock = NULL;
duke@435 1544 if (ParallelGCThreads != 0) {
duke@435 1545 lock = &_parDictionaryAllocLock;
duke@435 1546 }
duke@435 1547 FreeChunk* ec;
duke@435 1548 {
duke@435 1549 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1550 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1551 if (ec != NULL && ec->end() == chunk) {
duke@435 1552 // It's a coterminal block - we can coalesce.
duke@435 1553 size_t old_size = ec->size();
duke@435 1554 coalDeath(old_size);
duke@435 1555 removeChunkFromDictionary(ec);
duke@435 1556 size += old_size;
duke@435 1557 } else {
duke@435 1558 ec = (FreeChunk*)chunk;
duke@435 1559 }
duke@435 1560 }
duke@435 1561 ec->setSize(size);
duke@435 1562 debug_only(ec->mangleFreed(size));
duke@435 1563 if (size < SmallForDictionary) {
duke@435 1564 lock = _indexedFreeListParLocks[size];
duke@435 1565 }
duke@435 1566 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1567 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1568 // record the birth under the lock since the recording involves
duke@435 1569 // manipulation of the list on which the chunk lives and
duke@435 1570 // if the chunk is allocated and is the last on the list,
duke@435 1571 // the list can go away.
duke@435 1572 coalBirth(size);
duke@435 1573 }
duke@435 1574
duke@435 1575 void
duke@435 1576 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1577 size_t size) {
duke@435 1578 // check that the chunk does lie in this space!
duke@435 1579 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1580 assert_locked();
duke@435 1581 _bt.verify_single_block(chunk, size);
duke@435 1582
duke@435 1583 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1584 fc->setSize(size);
duke@435 1585 debug_only(fc->mangleFreed(size));
duke@435 1586 if (size < SmallForDictionary) {
duke@435 1587 returnChunkToFreeList(fc);
duke@435 1588 } else {
duke@435 1589 returnChunkToDictionary(fc);
duke@435 1590 }
duke@435 1591 }
duke@435 1592
duke@435 1593 void
duke@435 1594 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1595 size_t size, bool coalesced) {
duke@435 1596 assert_locked();
duke@435 1597 assert(chunk != NULL, "null chunk");
duke@435 1598 if (coalesced) {
duke@435 1599 // repair BOT
duke@435 1600 _bt.single_block(chunk, size);
duke@435 1601 }
duke@435 1602 addChunkToFreeLists(chunk, size);
duke@435 1603 }
duke@435 1604
duke@435 1605 // We _must_ find the purported chunk on our free lists;
duke@435 1606 // we assert if we don't.
duke@435 1607 void
duke@435 1608 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1609 size_t size = fc->size();
duke@435 1610 assert_locked();
duke@435 1611 debug_only(verifyFreeLists());
duke@435 1612 if (size < SmallForDictionary) {
duke@435 1613 removeChunkFromIndexedFreeList(fc);
duke@435 1614 } else {
duke@435 1615 removeChunkFromDictionary(fc);
duke@435 1616 }
duke@435 1617 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1618 debug_only(verifyFreeLists());
duke@435 1619 }
duke@435 1620
duke@435 1621 void
duke@435 1622 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1623 size_t size = fc->size();
duke@435 1624 assert_locked();
duke@435 1625 assert(fc != NULL, "null chunk");
duke@435 1626 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1627 _dictionary->removeChunk(fc);
duke@435 1628 // adjust _unallocated_block upward, as necessary
duke@435 1629 _bt.allocated((HeapWord*)fc, size);
duke@435 1630 }
duke@435 1631
duke@435 1632 void
duke@435 1633 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1634 assert_locked();
duke@435 1635 size_t size = fc->size();
duke@435 1636 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1637 NOT_PRODUCT(
duke@435 1638 if (FLSVerifyIndexTable) {
duke@435 1639 verifyIndexedFreeList(size);
duke@435 1640 }
duke@435 1641 )
duke@435 1642 _indexedFreeList[size].removeChunk(fc);
duke@435 1643 debug_only(fc->clearNext());
duke@435 1644 debug_only(fc->clearPrev());
duke@435 1645 NOT_PRODUCT(
duke@435 1646 if (FLSVerifyIndexTable) {
duke@435 1647 verifyIndexedFreeList(size);
duke@435 1648 }
duke@435 1649 )
duke@435 1650 }
duke@435 1651
duke@435 1652 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1653 /* A hint is the next larger size that has a surplus.
duke@435 1654 Start search at a size large enough to guarantee that
duke@435 1655 the excess is >= MIN_CHUNK. */
duke@435 1656 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1657 if (start < IndexSetSize) {
duke@435 1658 FreeList* it = _indexedFreeList;
duke@435 1659 size_t hint = _indexedFreeList[start].hint();
duke@435 1660 while (hint < IndexSetSize) {
duke@435 1661 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1662 FreeList *fl = &_indexedFreeList[hint];
duke@435 1663 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1664 // Found a list with surplus, reset original hint
duke@435 1665 // and split out a free chunk which is returned.
duke@435 1666 _indexedFreeList[start].set_hint(hint);
duke@435 1667 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1668 assert(res == NULL || res->isFree(),
duke@435 1669 "Should be returning a free chunk");
duke@435 1670 return res;
duke@435 1671 }
duke@435 1672 hint = fl->hint(); /* keep looking */
duke@435 1673 }
duke@435 1674 /* None found. */
duke@435 1675 it[start].set_hint(IndexSetSize);
duke@435 1676 }
duke@435 1677 return NULL;
duke@435 1678 }
duke@435 1679
duke@435 1680 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1681 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1682 size_t numWords) {
duke@435 1683 FreeChunk *curr = fl->head();
duke@435 1684 size_t oldNumWords = curr->size();
duke@435 1685 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1686 assert(curr != NULL, "List is empty");
duke@435 1687 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1688 "Size of chunks in the list is too small");
duke@435 1689
duke@435 1690 fl->removeChunk(curr);
duke@435 1691 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1692 // smallSplit(oldNumWords, numWords);
duke@435 1693 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1694 // Does anything have to be done for the remainder in terms of
duke@435 1695 // fixing the card table?
duke@435 1696 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1697 "Should be returning a free chunk");
duke@435 1698 return new_chunk;
duke@435 1699 }
duke@435 1700
duke@435 1701 FreeChunk*
duke@435 1702 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1703 size_t new_size) {
duke@435 1704 assert_locked();
duke@435 1705 size_t size = chunk->size();
duke@435 1706 assert(size > new_size, "Split from a smaller block?");
duke@435 1707 assert(is_aligned(chunk), "alignment problem");
duke@435 1708 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1709 size_t rem_size = size - new_size;
duke@435 1710 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1711 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1712 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1713 assert(is_aligned(ffc), "alignment problem");
duke@435 1714 ffc->setSize(rem_size);
duke@435 1715 ffc->linkNext(NULL);
duke@435 1716 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1717 // Above must occur before BOT is updated below.
duke@435 1718 // adjust block offset table
duke@435 1719 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1720 if (rem_size < SmallForDictionary) {
duke@435 1721 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1722 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1723 returnChunkToFreeList(ffc);
duke@435 1724 split(size, rem_size);
duke@435 1725 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1726 } else {
duke@435 1727 returnChunkToDictionary(ffc);
duke@435 1728 split(size ,rem_size);
duke@435 1729 }
duke@435 1730 chunk->setSize(new_size);
duke@435 1731 return chunk;
duke@435 1732 }
duke@435 1733
duke@435 1734 void
duke@435 1735 CompactibleFreeListSpace::sweep_completed() {
duke@435 1736 // Now that space is probably plentiful, refill linear
duke@435 1737 // allocation blocks as needed.
duke@435 1738 refillLinearAllocBlocksIfNeeded();
duke@435 1739 }
duke@435 1740
duke@435 1741 void
duke@435 1742 CompactibleFreeListSpace::gc_prologue() {
duke@435 1743 assert_locked();
duke@435 1744 if (PrintFLSStatistics != 0) {
duke@435 1745 gclog_or_tty->print("Before GC:\n");
duke@435 1746 reportFreeListStatistics();
duke@435 1747 }
duke@435 1748 refillLinearAllocBlocksIfNeeded();
duke@435 1749 }
duke@435 1750
duke@435 1751 void
duke@435 1752 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1753 assert_locked();
duke@435 1754 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1755 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1756 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1757 }
duke@435 1758 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1759 _promoInfo.stopTrackingPromotions();
duke@435 1760 repairLinearAllocationBlocks();
duke@435 1761 // Print Space's stats
duke@435 1762 if (PrintFLSStatistics != 0) {
duke@435 1763 gclog_or_tty->print("After GC:\n");
duke@435 1764 reportFreeListStatistics();
duke@435 1765 }
duke@435 1766 }
duke@435 1767
duke@435 1768 // Iteration support, mostly delegated from a CMS generation
duke@435 1769
duke@435 1770 void CompactibleFreeListSpace::save_marks() {
duke@435 1771 // mark the "end" of the used space at the time of this call;
duke@435 1772 // note, however, that promoted objects from this point
duke@435 1773 // on are tracked in the _promoInfo below.
duke@435 1774 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
duke@435 1775 unallocated_block() : end());
duke@435 1776 // inform allocator that promotions should be tracked.
duke@435 1777 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1778 _promoInfo.startTrackingPromotions();
duke@435 1779 }
duke@435 1780
duke@435 1781 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1782 assert(_promoInfo.tracking(), "No preceding save_marks?");
duke@435 1783 guarantee(SharedHeap::heap()->n_par_threads() == 0,
duke@435 1784 "Shouldn't be called (yet) during parallel part of gc.");
duke@435 1785 return _promoInfo.noPromotions();
duke@435 1786 }
duke@435 1787
duke@435 1788 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 1789 \
duke@435 1790 void CompactibleFreeListSpace:: \
duke@435 1791 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 1792 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 1793 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 1794 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 1795 /* \
duke@435 1796 * This also restores any displaced headers and removes the elements from \
duke@435 1797 * the iteration set as they are processed, so that we have a clean slate \
duke@435 1798 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 1799 * promoted as a result of the iteration they are iterated over as well. \
duke@435 1800 */ \
duke@435 1801 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 1802 }
duke@435 1803
duke@435 1804 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 1805
duke@435 1806 //////////////////////////////////////////////////////////////////////////////
duke@435 1807 // We go over the list of promoted objects, removing each from the list,
duke@435 1808 // and applying the closure (this may, in turn, add more elements to
duke@435 1809 // the tail of the promoted list, and these newly added objects will
duke@435 1810 // also be processed) until the list is empty.
duke@435 1811 // To aid verification and debugging, in the non-product builds
duke@435 1812 // we actually forward _promoHead each time we process a promoted oop.
duke@435 1813 // Note that this is not necessary in general (i.e. when we don't need to
duke@435 1814 // call PromotionInfo::verify()) because oop_iterate can only add to the
duke@435 1815 // end of _promoTail, and never needs to look at _promoHead.
duke@435 1816
duke@435 1817 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 1818 \
duke@435 1819 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
duke@435 1820 NOT_PRODUCT(verify()); \
duke@435 1821 PromotedObject *curObj, *nextObj; \
duke@435 1822 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
duke@435 1823 if ((nextObj = curObj->next()) == NULL) { \
duke@435 1824 /* protect ourselves against additions due to closure application \
duke@435 1825 below by resetting the list. */ \
duke@435 1826 assert(_promoTail == curObj, "Should have been the tail"); \
duke@435 1827 _promoHead = _promoTail = NULL; \
duke@435 1828 } \
duke@435 1829 if (curObj->hasDisplacedMark()) { \
duke@435 1830 /* restore displaced header */ \
duke@435 1831 oop(curObj)->set_mark(nextDisplacedHeader()); \
duke@435 1832 } else { \
duke@435 1833 /* restore prototypical header */ \
duke@435 1834 oop(curObj)->init_mark(); \
duke@435 1835 } \
duke@435 1836 /* The "promoted_mark" should now not be set */ \
duke@435 1837 assert(!curObj->hasPromotedMark(), \
duke@435 1838 "Should have been cleared by restoring displaced mark-word"); \
duke@435 1839 NOT_PRODUCT(_promoHead = nextObj); \
duke@435 1840 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
duke@435 1841 if (nextObj == NULL) { /* start at head of list reset above */ \
duke@435 1842 nextObj = _promoHead; \
duke@435 1843 } \
duke@435 1844 } \
duke@435 1845 assert(noPromotions(), "post-condition violation"); \
duke@435 1846 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
duke@435 1847 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
duke@435 1848 assert(_firstIndex == _nextIndex, "empty buffer"); \
duke@435 1849 }
duke@435 1850
duke@435 1851 // This should have been ALL_SINCE_...() just like the others,
duke@435 1852 // but, because the body of the method above is somehwat longer,
duke@435 1853 // the MSVC compiler cannot cope; as a workaround, we split the
duke@435 1854 // macro into its 3 constituent parts below (see original macro
duke@435 1855 // definition in specializedOopClosures.hpp).
duke@435 1856 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
duke@435 1857 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
duke@435 1858
duke@435 1859
duke@435 1860 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 1861 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 1862 guarantee(false, "NYI");
duke@435 1863 }
duke@435 1864
ysr@447 1865 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 1866 return _smallLinearAllocBlock._word_size == 0;
duke@435 1867 }
duke@435 1868
duke@435 1869 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 1870 // Fix up linear allocation blocks to look like free blocks
duke@435 1871 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 1872 }
duke@435 1873
duke@435 1874 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1875 assert_locked();
duke@435 1876 if (blk->_ptr != NULL) {
duke@435 1877 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 1878 "Minimum block size requirement");
duke@435 1879 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 1880 fc->setSize(blk->_word_size);
duke@435 1881 fc->linkPrev(NULL); // mark as free
duke@435 1882 fc->dontCoalesce();
duke@435 1883 assert(fc->isFree(), "just marked it free");
duke@435 1884 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 1885 }
duke@435 1886 }
duke@435 1887
duke@435 1888 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 1889 assert_locked();
duke@435 1890 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 1891 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 1892 "Size of linAB should be zero if the ptr is NULL");
duke@435 1893 // Reset the linAB refill and allocation size limit.
duke@435 1894 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 1895 }
duke@435 1896 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 1897 }
duke@435 1898
duke@435 1899 void
duke@435 1900 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 1901 assert_locked();
duke@435 1902 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 1903 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 1904 "blk invariant");
duke@435 1905 if (blk->_ptr == NULL) {
duke@435 1906 refillLinearAllocBlock(blk);
duke@435 1907 }
duke@435 1908 if (PrintMiscellaneous && Verbose) {
duke@435 1909 if (blk->_word_size == 0) {
duke@435 1910 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 1911 }
duke@435 1912 }
duke@435 1913 }
duke@435 1914
duke@435 1915 void
duke@435 1916 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1917 assert_locked();
duke@435 1918 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 1919 "linear allocation block should be empty");
duke@435 1920 FreeChunk* fc;
duke@435 1921 if (blk->_refillSize < SmallForDictionary &&
duke@435 1922 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 1923 // A linAB's strategy might be to use small sizes to reduce
duke@435 1924 // fragmentation but still get the benefits of allocation from a
duke@435 1925 // linAB.
duke@435 1926 } else {
duke@435 1927 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 1928 }
duke@435 1929 if (fc != NULL) {
duke@435 1930 blk->_ptr = (HeapWord*)fc;
duke@435 1931 blk->_word_size = fc->size();
duke@435 1932 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 1933 }
duke@435 1934 }
duke@435 1935
ysr@447 1936 // Support for concurrent collection policy decisions.
ysr@447 1937 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 1938 // In the future we might want to add in frgamentation stats --
ysr@447 1939 // including erosion of the "mountain" into this decision as well.
ysr@447 1940 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 1941 }
ysr@447 1942
duke@435 1943 // Support for compaction
duke@435 1944
duke@435 1945 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 1946 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 1947 // prepare_for_compaction() uses the space between live objects
duke@435 1948 // so that later phase can skip dead space quickly. So verification
duke@435 1949 // of the free lists doesn't work after.
duke@435 1950 }
duke@435 1951
duke@435 1952 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 1953 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 1954
duke@435 1955 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 1956 // In other versions of adjust_pointers(), a bail out
duke@435 1957 // based on the amount of live data in the generation
duke@435 1958 // (i.e., if 0, bail out) may be used.
duke@435 1959 // Cannot test used() == 0 here because the free lists have already
duke@435 1960 // been mangled by the compaction.
duke@435 1961
duke@435 1962 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 1963 // See note about verification in prepare_for_compaction().
duke@435 1964 }
duke@435 1965
duke@435 1966 void CompactibleFreeListSpace::compact() {
duke@435 1967 SCAN_AND_COMPACT(obj_size);
duke@435 1968 }
duke@435 1969
duke@435 1970 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 1971 // where fbs is free block sizes
duke@435 1972 double CompactibleFreeListSpace::flsFrag() const {
duke@435 1973 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 1974 double frag = 0.0;
duke@435 1975 size_t i;
duke@435 1976
duke@435 1977 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1978 double sz = i;
duke@435 1979 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 1980 }
duke@435 1981
duke@435 1982 double totFree = itabFree +
duke@435 1983 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 1984 if (totFree > 0) {
duke@435 1985 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 1986 (totFree * totFree));
duke@435 1987 frag = (double)1.0 - frag;
duke@435 1988 } else {
duke@435 1989 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 1990 }
duke@435 1991 return frag;
duke@435 1992 }
duke@435 1993
duke@435 1994 #define CoalSurplusPercent 1.05
duke@435 1995 #define SplitSurplusPercent 1.10
duke@435 1996
duke@435 1997 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 1998 float inter_sweep_current,
duke@435 1999 float inter_sweep_estimate) {
duke@435 2000 assert_locked();
duke@435 2001 size_t i;
duke@435 2002 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2003 FreeList* fl = &_indexedFreeList[i];
duke@435 2004 fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
duke@435 2005 fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
duke@435 2006 fl->set_beforeSweep(fl->count());
duke@435 2007 fl->set_bfrSurp(fl->surplus());
duke@435 2008 }
duke@435 2009 _dictionary->beginSweepDictCensus(CoalSurplusPercent,
duke@435 2010 inter_sweep_current,
duke@435 2011 inter_sweep_estimate);
duke@435 2012 }
duke@435 2013
duke@435 2014 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2015 assert_locked();
duke@435 2016 size_t i;
duke@435 2017 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2018 FreeList *fl = &_indexedFreeList[i];
duke@435 2019 fl->set_surplus(fl->count() -
duke@435 2020 (ssize_t)((double)fl->desired() * SplitSurplusPercent));
duke@435 2021 }
duke@435 2022 }
duke@435 2023
duke@435 2024 void CompactibleFreeListSpace::setFLHints() {
duke@435 2025 assert_locked();
duke@435 2026 size_t i;
duke@435 2027 size_t h = IndexSetSize;
duke@435 2028 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2029 FreeList *fl = &_indexedFreeList[i];
duke@435 2030 fl->set_hint(h);
duke@435 2031 if (fl->surplus() > 0) {
duke@435 2032 h = i;
duke@435 2033 }
duke@435 2034 }
duke@435 2035 }
duke@435 2036
duke@435 2037 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2038 assert_locked();
duke@435 2039 int i;
duke@435 2040 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2041 FreeList *fl = &_indexedFreeList[i];
duke@435 2042 fl->set_prevSweep(fl->count());
duke@435 2043 fl->set_coalBirths(0);
duke@435 2044 fl->set_coalDeaths(0);
duke@435 2045 fl->set_splitBirths(0);
duke@435 2046 fl->set_splitDeaths(0);
duke@435 2047 }
duke@435 2048 }
duke@435 2049
ysr@447 2050 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
duke@435 2051 setFLSurplus();
duke@435 2052 setFLHints();
duke@435 2053 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2054 printFLCensus(sweep_count);
duke@435 2055 }
duke@435 2056 clearFLCensus();
duke@435 2057 assert_locked();
duke@435 2058 _dictionary->endSweepDictCensus(SplitSurplusPercent);
duke@435 2059 }
duke@435 2060
duke@435 2061 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2062 if (size < SmallForDictionary) {
duke@435 2063 FreeList *fl = &_indexedFreeList[size];
duke@435 2064 return (fl->coalDesired() < 0) ||
duke@435 2065 ((int)fl->count() > fl->coalDesired());
duke@435 2066 } else {
duke@435 2067 return dictionary()->coalDictOverPopulated(size);
duke@435 2068 }
duke@435 2069 }
duke@435 2070
duke@435 2071 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2072 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2073 FreeList *fl = &_indexedFreeList[size];
duke@435 2074 fl->increment_coalBirths();
duke@435 2075 fl->increment_surplus();
duke@435 2076 }
duke@435 2077
duke@435 2078 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2079 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2080 FreeList *fl = &_indexedFreeList[size];
duke@435 2081 fl->increment_coalDeaths();
duke@435 2082 fl->decrement_surplus();
duke@435 2083 }
duke@435 2084
duke@435 2085 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2086 if (size < SmallForDictionary) {
duke@435 2087 smallCoalBirth(size);
duke@435 2088 } else {
duke@435 2089 dictionary()->dictCensusUpdate(size,
duke@435 2090 false /* split */,
duke@435 2091 true /* birth */);
duke@435 2092 }
duke@435 2093 }
duke@435 2094
duke@435 2095 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2096 if(size < SmallForDictionary) {
duke@435 2097 smallCoalDeath(size);
duke@435 2098 } else {
duke@435 2099 dictionary()->dictCensusUpdate(size,
duke@435 2100 false /* split */,
duke@435 2101 false /* birth */);
duke@435 2102 }
duke@435 2103 }
duke@435 2104
duke@435 2105 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2106 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2107 FreeList *fl = &_indexedFreeList[size];
duke@435 2108 fl->increment_splitBirths();
duke@435 2109 fl->increment_surplus();
duke@435 2110 }
duke@435 2111
duke@435 2112 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2113 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2114 FreeList *fl = &_indexedFreeList[size];
duke@435 2115 fl->increment_splitDeaths();
duke@435 2116 fl->decrement_surplus();
duke@435 2117 }
duke@435 2118
duke@435 2119 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2120 if (size < SmallForDictionary) {
duke@435 2121 smallSplitBirth(size);
duke@435 2122 } else {
duke@435 2123 dictionary()->dictCensusUpdate(size,
duke@435 2124 true /* split */,
duke@435 2125 true /* birth */);
duke@435 2126 }
duke@435 2127 }
duke@435 2128
duke@435 2129 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2130 if (size < SmallForDictionary) {
duke@435 2131 smallSplitDeath(size);
duke@435 2132 } else {
duke@435 2133 dictionary()->dictCensusUpdate(size,
duke@435 2134 true /* split */,
duke@435 2135 false /* birth */);
duke@435 2136 }
duke@435 2137 }
duke@435 2138
duke@435 2139 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2140 size_t to2 = from - to1;
duke@435 2141 splitDeath(from);
duke@435 2142 splitBirth(to1);
duke@435 2143 splitBirth(to2);
duke@435 2144 }
duke@435 2145
duke@435 2146 void CompactibleFreeListSpace::print() const {
duke@435 2147 tty->print(" CompactibleFreeListSpace");
duke@435 2148 Space::print();
duke@435 2149 }
duke@435 2150
duke@435 2151 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2152 assert_locked();
duke@435 2153 repairLinearAllocationBlocks();
duke@435 2154 // Verify that the SpoolBlocks look like free blocks of
duke@435 2155 // appropriate sizes... To be done ...
duke@435 2156 }
duke@435 2157
duke@435 2158 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2159 private:
duke@435 2160 const CompactibleFreeListSpace* _sp;
duke@435 2161 const MemRegion _span;
duke@435 2162
duke@435 2163 public:
duke@435 2164 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
duke@435 2165 MemRegion span) : _sp(sp), _span(span) { }
duke@435 2166
coleenp@548 2167 virtual size_t do_blk(HeapWord* addr) {
duke@435 2168 size_t res;
duke@435 2169 if (_sp->block_is_obj(addr)) {
duke@435 2170 oop p = oop(addr);
duke@435 2171 guarantee(p->is_oop(), "Should be an oop");
duke@435 2172 res = _sp->adjustObjectSize(p->size());
duke@435 2173 if (_sp->obj_is_alive(addr)) {
duke@435 2174 p->verify();
duke@435 2175 }
duke@435 2176 } else {
duke@435 2177 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2178 res = fc->size();
duke@435 2179 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2180 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2181 "Chunk should be on a free list");
duke@435 2182 }
duke@435 2183 }
duke@435 2184 guarantee(res != 0, "Livelock: no rank reduction!");
duke@435 2185 return res;
duke@435 2186 }
duke@435 2187 };
duke@435 2188
duke@435 2189 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2190 private:
duke@435 2191 const CMSCollector* _collector;
duke@435 2192 const CompactibleFreeListSpace* _sp;
duke@435 2193 const MemRegion _span;
duke@435 2194 const bool _past_remark;
duke@435 2195 const CMSBitMap* _bit_map;
duke@435 2196
coleenp@548 2197 protected:
coleenp@548 2198 void do_oop(void* p, oop obj) {
coleenp@548 2199 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2200 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2201 // Should be a valid object; the first disjunct below allows
coleenp@548 2202 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2203 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2204 // are spanned by _span (CMS heap) above.
coleenp@548 2205 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2206 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2207 "Should be an object");
coleenp@548 2208 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2209 obj->verify();
coleenp@548 2210 if (_past_remark) {
coleenp@548 2211 // Remark has been completed, the object should be marked
coleenp@548 2212 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2213 }
coleenp@548 2214 } else { // reference within CMS heap
coleenp@548 2215 if (_past_remark) {
coleenp@548 2216 // Remark has been completed -- so the referent should have
coleenp@548 2217 // been marked, if referring object is.
coleenp@548 2218 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2219 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2220 }
coleenp@548 2221 }
coleenp@548 2222 }
coleenp@548 2223 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2224 // the reference is from FLS, and points out of FLS
coleenp@548 2225 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2226 obj->verify();
coleenp@548 2227 }
coleenp@548 2228 }
coleenp@548 2229
coleenp@548 2230 template <class T> void do_oop_work(T* p) {
coleenp@548 2231 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2232 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2233 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2234 do_oop(p, obj);
coleenp@548 2235 }
coleenp@548 2236 }
coleenp@548 2237
duke@435 2238 public:
duke@435 2239 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2240 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2241 bool past_remark, CMSBitMap* bit_map) :
duke@435 2242 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2243 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2244
coleenp@548 2245 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2246 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2247 };
duke@435 2248
duke@435 2249 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2250 assert_lock_strong(&_freelistLock);
duke@435 2251 verify_objects_initialized();
duke@435 2252 MemRegion span = _collector->_span;
duke@435 2253 bool past_remark = (_collector->abstract_state() ==
duke@435 2254 CMSCollector::Sweeping);
duke@435 2255
duke@435 2256 ResourceMark rm;
duke@435 2257 HandleMark hm;
duke@435 2258
duke@435 2259 // Check integrity of CFL data structures
duke@435 2260 _promoInfo.verify();
duke@435 2261 _dictionary->verify();
duke@435 2262 if (FLSVerifyIndexTable) {
duke@435 2263 verifyIndexedFreeLists();
duke@435 2264 }
duke@435 2265 // Check integrity of all objects and free blocks in space
duke@435 2266 {
duke@435 2267 VerifyAllBlksClosure cl(this, span);
duke@435 2268 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2269 }
duke@435 2270 // Check that all references in the heap to FLS
duke@435 2271 // are to valid objects in FLS or that references in
duke@435 2272 // FLS are to valid objects elsewhere in the heap
duke@435 2273 if (FLSVerifyAllHeapReferences)
duke@435 2274 {
duke@435 2275 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2276 _collector->markBitMap());
duke@435 2277 CollectedHeap* ch = Universe::heap();
duke@435 2278 ch->oop_iterate(&cl); // all oops in generations
duke@435 2279 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2280 }
duke@435 2281
duke@435 2282 if (VerifyObjectStartArray) {
duke@435 2283 // Verify the block offset table
duke@435 2284 _bt.verify();
duke@435 2285 }
duke@435 2286 }
duke@435 2287
duke@435 2288 #ifndef PRODUCT
duke@435 2289 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2290 if (FLSVerifyLists) {
duke@435 2291 _dictionary->verify();
duke@435 2292 verifyIndexedFreeLists();
duke@435 2293 } else {
duke@435 2294 if (FLSVerifyDictionary) {
duke@435 2295 _dictionary->verify();
duke@435 2296 }
duke@435 2297 if (FLSVerifyIndexTable) {
duke@435 2298 verifyIndexedFreeLists();
duke@435 2299 }
duke@435 2300 }
duke@435 2301 }
duke@435 2302 #endif
duke@435 2303
duke@435 2304 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2305 size_t i = 0;
duke@435 2306 for (; i < MinChunkSize; i++) {
duke@435 2307 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2308 }
duke@435 2309 for (; i < IndexSetSize; i++) {
duke@435 2310 verifyIndexedFreeList(i);
duke@435 2311 }
duke@435 2312 }
duke@435 2313
duke@435 2314 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@777 2315 FreeChunk* fc = _indexedFreeList[size].head();
ysr@777 2316 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
ysr@777 2317 for (; fc != NULL; fc = fc->next()) {
duke@435 2318 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2319 guarantee(fc->isFree(), "!free?");
duke@435 2320 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
duke@435 2321 }
duke@435 2322 }
duke@435 2323
duke@435 2324 #ifndef PRODUCT
duke@435 2325 void CompactibleFreeListSpace::checkFreeListConsistency() const {
duke@435 2326 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2327 "Some sizes can't be allocated without recourse to"
duke@435 2328 " linear allocation buffers");
duke@435 2329 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2330 "else MIN_TREE_CHUNK_SIZE is wrong");
duke@435 2331 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
duke@435 2332 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
duke@435 2333 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2334 "Some for-loops may be incorrectly initialized");
duke@435 2335 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2336 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2337 }
duke@435 2338 #endif
duke@435 2339
ysr@447 2340 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2341 assert_lock_strong(&_freelistLock);
ysr@447 2342 FreeList total;
ysr@447 2343 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2344 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2345 size_t totalFree = 0;
duke@435 2346 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2347 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2348 totalFree += fl->count() * fl->size();
ysr@447 2349 if (i % (40*IndexSetStride) == 0) {
ysr@447 2350 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2351 }
ysr@447 2352 fl->print_on(gclog_or_tty);
ysr@447 2353 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2354 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2355 total.set_desired( total.desired() + fl->desired() );
ysr@447 2356 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2357 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2358 total.set_count( total.count() + fl->count() );
ysr@447 2359 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2360 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2361 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2362 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2363 }
ysr@447 2364 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2365 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2366 SIZE_FORMAT " words", totalFree);
duke@435 2367 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2368 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2369 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2370 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2371 _dictionary->printDictCensus();
duke@435 2372 }
duke@435 2373
duke@435 2374 // Return the next displaced header, incrementing the pointer and
duke@435 2375 // recycling spool area as necessary.
duke@435 2376 markOop PromotionInfo::nextDisplacedHeader() {
duke@435 2377 assert(_spoolHead != NULL, "promotionInfo inconsistency");
duke@435 2378 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
duke@435 2379 "Empty spool space: no displaced header can be fetched");
duke@435 2380 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
duke@435 2381 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
duke@435 2382 // Spool forward
duke@435 2383 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
duke@435 2384 // forward to next block, recycling this block into spare spool buffer
duke@435 2385 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
duke@435 2386 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
duke@435 2387 _spoolHead->nextSpoolBlock = _spareSpool;
duke@435 2388 _spareSpool = _spoolHead;
duke@435 2389 _spoolHead = tmp;
duke@435 2390 _firstIndex = 1;
duke@435 2391 NOT_PRODUCT(
duke@435 2392 if (_spoolHead == NULL) { // all buffers fully consumed
duke@435 2393 assert(_spoolTail == NULL && _nextIndex == 1,
duke@435 2394 "spool buffers processing inconsistency");
duke@435 2395 }
duke@435 2396 )
duke@435 2397 }
duke@435 2398 return hdr;
duke@435 2399 }
duke@435 2400
duke@435 2401 void PromotionInfo::track(PromotedObject* trackOop) {
duke@435 2402 track(trackOop, oop(trackOop)->klass());
duke@435 2403 }
duke@435 2404
duke@435 2405 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
duke@435 2406 // make a copy of header as it may need to be spooled
duke@435 2407 markOop mark = oop(trackOop)->mark();
duke@435 2408 trackOop->clearNext();
duke@435 2409 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
duke@435 2410 // save non-prototypical header, and mark oop
duke@435 2411 saveDisplacedHeader(mark);
duke@435 2412 trackOop->setDisplacedMark();
duke@435 2413 } else {
duke@435 2414 // we'd like to assert something like the following:
duke@435 2415 // assert(mark == markOopDesc::prototype(), "consistency check");
duke@435 2416 // ... but the above won't work because the age bits have not (yet) been
duke@435 2417 // cleared. The remainder of the check would be identical to the
duke@435 2418 // condition checked in must_be_preserved() above, so we don't really
duke@435 2419 // have anything useful to check here!
duke@435 2420 }
duke@435 2421 if (_promoTail != NULL) {
duke@435 2422 assert(_promoHead != NULL, "List consistency");
duke@435 2423 _promoTail->setNext(trackOop);
duke@435 2424 _promoTail = trackOop;
duke@435 2425 } else {
duke@435 2426 assert(_promoHead == NULL, "List consistency");
duke@435 2427 _promoHead = _promoTail = trackOop;
duke@435 2428 }
duke@435 2429 // Mask as newly promoted, so we can skip over such objects
duke@435 2430 // when scanning dirty cards
duke@435 2431 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
duke@435 2432 trackOop->setPromotedMark();
duke@435 2433 }
duke@435 2434
duke@435 2435 // Save the given displaced header, incrementing the pointer and
duke@435 2436 // obtaining more spool area as necessary.
duke@435 2437 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
duke@435 2438 assert(_spoolHead != NULL && _spoolTail != NULL,
duke@435 2439 "promotionInfo inconsistency");
duke@435 2440 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
duke@435 2441 _spoolTail->displacedHdr[_nextIndex] = hdr;
duke@435 2442 // Spool forward
duke@435 2443 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
duke@435 2444 // get a new spooling block
duke@435 2445 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
duke@435 2446 _splice_point = _spoolTail; // save for splicing
duke@435 2447 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
duke@435 2448 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
duke@435 2449 // ... but will attempt filling before next promotion attempt
duke@435 2450 _nextIndex = 1;
duke@435 2451 }
duke@435 2452 }
duke@435 2453
duke@435 2454 // Ensure that spooling space exists. Return false if spooling space
duke@435 2455 // could not be obtained.
duke@435 2456 bool PromotionInfo::ensure_spooling_space_work() {
duke@435 2457 assert(!has_spooling_space(), "Only call when there is no spooling space");
duke@435 2458 // Try and obtain more spooling space
duke@435 2459 SpoolBlock* newSpool = getSpoolBlock();
duke@435 2460 assert(newSpool == NULL ||
duke@435 2461 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
duke@435 2462 "getSpoolBlock() sanity check");
duke@435 2463 if (newSpool == NULL) {
duke@435 2464 return false;
duke@435 2465 }
duke@435 2466 _nextIndex = 1;
duke@435 2467 if (_spoolTail == NULL) {
duke@435 2468 _spoolTail = newSpool;
duke@435 2469 if (_spoolHead == NULL) {
duke@435 2470 _spoolHead = newSpool;
duke@435 2471 _firstIndex = 1;
duke@435 2472 } else {
duke@435 2473 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
duke@435 2474 "Splice point invariant");
duke@435 2475 // Extra check that _splice_point is connected to list
duke@435 2476 #ifdef ASSERT
duke@435 2477 {
duke@435 2478 SpoolBlock* blk = _spoolHead;
duke@435 2479 for (; blk->nextSpoolBlock != NULL;
duke@435 2480 blk = blk->nextSpoolBlock);
duke@435 2481 assert(blk != NULL && blk == _splice_point,
duke@435 2482 "Splice point incorrect");
duke@435 2483 }
duke@435 2484 #endif // ASSERT
duke@435 2485 _splice_point->nextSpoolBlock = newSpool;
duke@435 2486 }
duke@435 2487 } else {
duke@435 2488 assert(_spoolHead != NULL, "spool list consistency");
duke@435 2489 _spoolTail->nextSpoolBlock = newSpool;
duke@435 2490 _spoolTail = newSpool;
duke@435 2491 }
duke@435 2492 return true;
duke@435 2493 }
duke@435 2494
duke@435 2495 // Get a free spool buffer from the free pool, getting a new block
duke@435 2496 // from the heap if necessary.
duke@435 2497 SpoolBlock* PromotionInfo::getSpoolBlock() {
duke@435 2498 SpoolBlock* res;
duke@435 2499 if ((res = _spareSpool) != NULL) {
duke@435 2500 _spareSpool = _spareSpool->nextSpoolBlock;
duke@435 2501 res->nextSpoolBlock = NULL;
duke@435 2502 } else { // spare spool exhausted, get some from heap
duke@435 2503 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
duke@435 2504 if (res != NULL) {
duke@435 2505 res->init();
duke@435 2506 }
duke@435 2507 }
duke@435 2508 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
duke@435 2509 return res;
duke@435 2510 }
duke@435 2511
duke@435 2512 void PromotionInfo::startTrackingPromotions() {
duke@435 2513 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2514 "spooling inconsistency?");
duke@435 2515 _firstIndex = _nextIndex = 1;
duke@435 2516 _tracking = true;
duke@435 2517 }
duke@435 2518
duke@435 2519 void PromotionInfo::stopTrackingPromotions() {
duke@435 2520 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2521 "spooling inconsistency?");
duke@435 2522 _firstIndex = _nextIndex = 1;
duke@435 2523 _tracking = false;
duke@435 2524 }
duke@435 2525
duke@435 2526 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
duke@435 2527 // points to the next slot available for filling.
duke@435 2528 // The set of slots holding displaced headers are then all those in the
duke@435 2529 // right-open interval denoted by:
duke@435 2530 //
duke@435 2531 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
duke@435 2532 //
duke@435 2533 // When _spoolTail is NULL, then the set of slots with displaced headers
duke@435 2534 // is all those starting at the slot <_spoolHead, _firstIndex> and
duke@435 2535 // going up to the last slot of last block in the linked list.
duke@435 2536 // In this lartter case, _splice_point points to the tail block of
duke@435 2537 // this linked list of blocks holding displaced headers.
duke@435 2538 void PromotionInfo::verify() const {
duke@435 2539 // Verify the following:
duke@435 2540 // 1. the number of displaced headers matches the number of promoted
duke@435 2541 // objects that have displaced headers
duke@435 2542 // 2. each promoted object lies in this space
duke@435 2543 debug_only(
duke@435 2544 PromotedObject* junk = NULL;
duke@435 2545 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
duke@435 2546 "Offset of PromotedObject::_next is expected to align with "
duke@435 2547 " the OopDesc::_mark within OopDesc");
duke@435 2548 )
duke@435 2549 // FIXME: guarantee????
duke@435 2550 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
duke@435 2551 _splice_point != NULL, "list consistency");
duke@435 2552 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
duke@435 2553 // count the number of objects with displaced headers
duke@435 2554 size_t numObjsWithDisplacedHdrs = 0;
duke@435 2555 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
duke@435 2556 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
duke@435 2557 // the last promoted object may fail the mark() != NULL test of is_oop().
duke@435 2558 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
duke@435 2559 if (curObj->hasDisplacedMark()) {
duke@435 2560 numObjsWithDisplacedHdrs++;
duke@435 2561 }
duke@435 2562 }
duke@435 2563 // Count the number of displaced headers
duke@435 2564 size_t numDisplacedHdrs = 0;
duke@435 2565 for (SpoolBlock* curSpool = _spoolHead;
duke@435 2566 curSpool != _spoolTail && curSpool != NULL;
duke@435 2567 curSpool = curSpool->nextSpoolBlock) {
duke@435 2568 // the first entry is just a self-pointer; indices 1 through
duke@435 2569 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
duke@435 2570 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
duke@435 2571 "first entry of displacedHdr should be self-referential");
duke@435 2572 numDisplacedHdrs += curSpool->bufferSize - 1;
duke@435 2573 }
duke@435 2574 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
duke@435 2575 "internal consistency");
duke@435 2576 guarantee(_spoolTail != NULL || _nextIndex == 1,
duke@435 2577 "Inconsistency between _spoolTail and _nextIndex");
duke@435 2578 // We overcounted (_firstIndex-1) worth of slots in block
duke@435 2579 // _spoolHead and we undercounted (_nextIndex-1) worth of
duke@435 2580 // slots in block _spoolTail. We make an appropriate
duke@435 2581 // adjustment by subtracting the first and adding the
duke@435 2582 // second: - (_firstIndex - 1) + (_nextIndex - 1)
duke@435 2583 numDisplacedHdrs += (_nextIndex - _firstIndex);
duke@435 2584 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
duke@435 2585 }
duke@435 2586
duke@435 2587
duke@435 2588 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2589 _cfls(cfls)
duke@435 2590 {
duke@435 2591 _blocks_to_claim = CMSParPromoteBlocksToClaim;
duke@435 2592 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2593 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2594 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2595 _indexedFreeList[i].set_size(i);
duke@435 2596 }
duke@435 2597 }
duke@435 2598
duke@435 2599 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2600 FreeChunk* res;
duke@435 2601 word_sz = _cfls->adjustObjectSize(word_sz);
duke@435 2602 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2603 // This locking manages sync with other large object allocations.
duke@435 2604 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2605 Mutex::_no_safepoint_check_flag);
duke@435 2606 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2607 if (res == NULL) return NULL;
duke@435 2608 } else {
duke@435 2609 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2610 bool filled = false; //TRAP
duke@435 2611 if (fl->count() == 0) {
duke@435 2612 bool filled = true; //TRAP
duke@435 2613 // Attempt to refill this local free list.
duke@435 2614 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
duke@435 2615 // If it didn't work, give up.
duke@435 2616 if (fl->count() == 0) return NULL;
duke@435 2617 }
duke@435 2618 res = fl->getChunkAtHead();
duke@435 2619 assert(res != NULL, "Why was count non-zero?");
duke@435 2620 }
duke@435 2621 res->markNotFree();
duke@435 2622 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2623 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2624 // mangle a just allocated object with a distinct pattern.
duke@435 2625 debug_only(res->mangleAllocated(word_sz));
duke@435 2626 return (HeapWord*)res;
duke@435 2627 }
duke@435 2628
duke@435 2629 void CFLS_LAB::retire() {
duke@435 2630 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2631 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2632 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2633 if (_indexedFreeList[i].count() > 0) {
duke@435 2634 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
duke@435 2635 Mutex::_no_safepoint_check_flag);
duke@435 2636 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
duke@435 2637 // Reset this list.
duke@435 2638 _indexedFreeList[i] = FreeList();
duke@435 2639 _indexedFreeList[i].set_size(i);
duke@435 2640 }
duke@435 2641 }
duke@435 2642 }
duke@435 2643
duke@435 2644 void
duke@435 2645 CompactibleFreeListSpace::
duke@435 2646 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2647 assert(fl->count() == 0, "Precondition.");
duke@435 2648 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2649 "Precondition");
duke@435 2650
duke@435 2651 // We'll try all multiples of word_sz in the indexed set (starting with
duke@435 2652 // word_sz itself), then try getting a big chunk and splitting it.
duke@435 2653 int k = 1;
duke@435 2654 size_t cur_sz = k * word_sz;
duke@435 2655 bool found = false;
duke@435 2656 while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
duke@435 2657 FreeList* gfl = &_indexedFreeList[cur_sz];
duke@435 2658 FreeList fl_for_cur_sz; // Empty.
duke@435 2659 fl_for_cur_sz.set_size(cur_sz);
duke@435 2660 {
duke@435 2661 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
duke@435 2662 Mutex::_no_safepoint_check_flag);
duke@435 2663 if (gfl->count() != 0) {
duke@435 2664 size_t nn = MAX2(n/k, (size_t)1);
duke@435 2665 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
duke@435 2666 found = true;
duke@435 2667 }
duke@435 2668 }
duke@435 2669 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
duke@435 2670 if (found) {
duke@435 2671 if (k == 1) {
duke@435 2672 fl->prepend(&fl_for_cur_sz);
duke@435 2673 } else {
duke@435 2674 // Divide each block on fl_for_cur_sz up k ways.
duke@435 2675 FreeChunk* fc;
duke@435 2676 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
duke@435 2677 // Must do this in reverse order, so that anybody attempting to
duke@435 2678 // access the main chunk sees it as a single free block until we
duke@435 2679 // change it.
duke@435 2680 size_t fc_size = fc->size();
duke@435 2681 for (int i = k-1; i >= 0; i--) {
duke@435 2682 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2683 ffc->setSize(word_sz);
duke@435 2684 ffc->linkNext(NULL);
duke@435 2685 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2686 // Above must occur before BOT is updated below.
duke@435 2687 // splitting from the right, fc_size == (k - i + 1) * wordsize
duke@435 2688 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 2689 fc_size -= word_sz;
duke@435 2690 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2691 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2692 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2693 // Push this on "fl".
duke@435 2694 fl->returnChunkAtHead(ffc);
duke@435 2695 }
duke@435 2696 // TRAP
duke@435 2697 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2698 }
duke@435 2699 }
duke@435 2700 return;
duke@435 2701 }
duke@435 2702 k++; cur_sz = k * word_sz;
duke@435 2703 }
duke@435 2704 // Otherwise, we'll split a block from the dictionary.
duke@435 2705 FreeChunk* fc = NULL;
duke@435 2706 FreeChunk* rem_fc = NULL;
duke@435 2707 size_t rem;
duke@435 2708 {
duke@435 2709 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2710 Mutex::_no_safepoint_check_flag);
duke@435 2711 while (n > 0) {
duke@435 2712 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2713 _dictionary->minSize()),
duke@435 2714 FreeBlockDictionary::atLeast);
duke@435 2715 if (fc != NULL) {
duke@435 2716 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
duke@435 2717 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2718 true /*split*/,
duke@435 2719 false /*birth*/);
duke@435 2720 break;
duke@435 2721 } else {
duke@435 2722 n--;
duke@435 2723 }
duke@435 2724 }
duke@435 2725 if (fc == NULL) return;
duke@435 2726 // Otherwise, split up that block.
duke@435 2727 size_t nn = fc->size() / word_sz;
duke@435 2728 n = MIN2(nn, n);
duke@435 2729 rem = fc->size() - n * word_sz;
duke@435 2730 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2731 if (rem > 0 && rem < MinChunkSize) {
duke@435 2732 n--; rem += word_sz;
duke@435 2733 }
duke@435 2734 // First return the remainder, if any.
duke@435 2735 // Note that we hold the lock until we decide if we're going to give
duke@435 2736 // back the remainder to the dictionary, since a contending allocator
duke@435 2737 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2738 // hit if the block is a small block.)
duke@435 2739 if (rem > 0) {
duke@435 2740 size_t prefix_size = n * word_sz;
duke@435 2741 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2742 rem_fc->setSize(rem);
duke@435 2743 rem_fc->linkNext(NULL);
duke@435 2744 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2745 // Above must occur before BOT is updated below.
duke@435 2746 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
duke@435 2747 if (rem >= IndexSetSize) {
duke@435 2748 returnChunkToDictionary(rem_fc);
duke@435 2749 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2750 true /*split*/,
duke@435 2751 true /*birth*/);
duke@435 2752 rem_fc = NULL;
duke@435 2753 }
duke@435 2754 // Otherwise, return it to the small list below.
duke@435 2755 }
duke@435 2756 }
duke@435 2757 //
duke@435 2758 if (rem_fc != NULL) {
duke@435 2759 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2760 Mutex::_no_safepoint_check_flag);
duke@435 2761 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2762 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2763 smallSplitBirth(rem);
duke@435 2764 }
duke@435 2765
duke@435 2766 // Now do the splitting up.
duke@435 2767 // Must do this in reverse order, so that anybody attempting to
duke@435 2768 // access the main chunk sees it as a single free block until we
duke@435 2769 // change it.
duke@435 2770 size_t fc_size = n * word_sz;
duke@435 2771 // All but first chunk in this loop
duke@435 2772 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2773 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2774 ffc->setSize(word_sz);
duke@435 2775 ffc->linkNext(NULL);
duke@435 2776 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2777 // Above must occur before BOT is updated below.
duke@435 2778 // splitting from the right, fc_size == (n - i + 1) * wordsize
duke@435 2779 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 2780 fc_size -= word_sz;
duke@435 2781 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2782 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2783 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2784 // Push this on "fl".
duke@435 2785 fl->returnChunkAtHead(ffc);
duke@435 2786 }
duke@435 2787 // First chunk
duke@435 2788 fc->setSize(word_sz);
duke@435 2789 fc->linkNext(NULL);
duke@435 2790 fc->linkPrev(NULL);
duke@435 2791 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2792 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2793 fl->returnChunkAtHead(fc);
duke@435 2794
duke@435 2795 {
duke@435 2796 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2797 Mutex::_no_safepoint_check_flag);
duke@435 2798 ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
duke@435 2799 _indexedFreeList[word_sz].set_splitBirths(new_births);
duke@435 2800 ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
duke@435 2801 _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2802 }
duke@435 2803
duke@435 2804 // TRAP
duke@435 2805 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2806 }
duke@435 2807
duke@435 2808 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2809 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2810 // XXX Need to suitably abstract and generalize this and the next
duke@435 2811 // method into one.
duke@435 2812 void
duke@435 2813 CompactibleFreeListSpace::
duke@435 2814 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2815 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2816 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2817 const size_t task_size = rescan_task_size();
duke@435 2818 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2819 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2820 assert(n_tasks == 0 ||
ysr@775 2821 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2822 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2823 "n_tasks calculation incorrect");
duke@435 2824 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2825 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2826 pst->set_par_threads(n_threads);
duke@435 2827 pst->set_n_tasks((int)n_tasks);
duke@435 2828 }
duke@435 2829
duke@435 2830 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2831 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2832 void
duke@435 2833 CompactibleFreeListSpace::
duke@435 2834 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2835 HeapWord* low) {
duke@435 2836 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2837 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2838 const size_t task_size = marking_task_size();
duke@435 2839 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 2840 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 2841 "Otherwise arithmetic below would be incorrect");
duke@435 2842 MemRegion span = _gen->reserved();
duke@435 2843 if (low != NULL) {
duke@435 2844 if (span.contains(low)) {
duke@435 2845 // Align low down to a card boundary so that
duke@435 2846 // we can use block_offset_careful() on span boundaries.
duke@435 2847 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 2848 CardTableModRefBS::card_size);
duke@435 2849 // Clip span prefix at aligned_low
duke@435 2850 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 2851 } else if (low > span.end()) {
duke@435 2852 span = MemRegion(low, low); // Null region
duke@435 2853 } // else use entire span
duke@435 2854 }
duke@435 2855 assert(span.is_empty() ||
duke@435 2856 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 2857 "span should start at a card boundary");
duke@435 2858 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 2859 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 2860 assert(n_tasks == 0 ||
duke@435 2861 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 2862 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 2863 "n_tasks calculation incorrect");
duke@435 2864 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2865 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2866 pst->set_par_threads(n_threads);
duke@435 2867 pst->set_n_tasks((int)n_tasks);
duke@435 2868 }

mercurial