src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Sun, 13 Apr 2008 17:43:42 -0400

author
coleenp
date
Sun, 13 Apr 2008 17:43:42 -0400
changeset 548
ba764ed4b6f2
parent 447
6432c3bb6240
child 622
790e66e5fbac
child 777
37f87013dfd8
permissions
-rw-r--r--

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold

duke@435 1 /*
duke@435 2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
duke@435 27
duke@435 28 /////////////////////////////////////////////////////////////////////////
duke@435 29 //// CompactibleFreeListSpace
duke@435 30 /////////////////////////////////////////////////////////////////////////
duke@435 31
duke@435 32 // highest ranked free list lock rank
duke@435 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 34
duke@435 35 // Constructor
duke@435 36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 37 MemRegion mr, bool use_adaptive_freelists,
duke@435 38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 39 _dictionaryChoice(dictionaryChoice),
duke@435 40 _adaptive_freelists(use_adaptive_freelists),
duke@435 41 _bt(bs, mr),
duke@435 42 // free list locks are in the range of values taken by _lockRank
duke@435 43 // This range currently is [_leaf+2, _leaf+3]
duke@435 44 // Note: this requires that CFLspace c'tors
duke@435 45 // are called serially in the order in which the locks are
duke@435 46 // are acquired in the program text. This is true today.
duke@435 47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 49 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 51 CMSRescanMultiple),
duke@435 52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 53 CMSConcMarkMultiple),
duke@435 54 _collector(NULL)
duke@435 55 {
duke@435 56 _bt.set_space(this);
duke@435 57 initialize(mr, true);
duke@435 58 // We have all of "mr", all of which we place in the dictionary
duke@435 59 // as one big chunk. We'll need to decide here which of several
duke@435 60 // possible alternative dictionary implementations to use. For
duke@435 61 // now the choice is easy, since we have only one working
duke@435 62 // implementation, namely, the simple binary tree (splaying
duke@435 63 // temporarily disabled).
duke@435 64 switch (dictionaryChoice) {
duke@435 65 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 66 _dictionary = new BinaryTreeDictionary(mr);
duke@435 67 break;
duke@435 68 case FreeBlockDictionary::dictionarySplayTree:
duke@435 69 case FreeBlockDictionary::dictionarySkipList:
duke@435 70 default:
duke@435 71 warning("dictionaryChoice: selected option not understood; using"
duke@435 72 " default BinaryTreeDictionary implementation instead.");
duke@435 73 _dictionary = new BinaryTreeDictionary(mr);
duke@435 74 break;
duke@435 75 }
duke@435 76 splitBirth(mr.word_size());
duke@435 77 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 78 // The indexed free lists are initially all empty and are lazily
duke@435 79 // filled in on demand. Initialize the array elements to NULL.
duke@435 80 initializeIndexedFreeListArray();
duke@435 81
duke@435 82 // Not using adaptive free lists assumes that allocation is first
duke@435 83 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 84 // has to have the klass's klassKlass allocated at a lower
duke@435 85 // address in the heap than the klass so that the klassKlass is
duke@435 86 // moved to its new location before the klass is moved.
duke@435 87 // Set the _refillSize for the linear allocation blocks
duke@435 88 if (!use_adaptive_freelists) {
duke@435 89 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 90 // The small linAB initially has all the space and will allocate
duke@435 91 // a chunk of any size.
duke@435 92 HeapWord* addr = (HeapWord*) fc;
duke@435 93 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 94 1024*SmallForLinearAlloc, fc->size());
duke@435 95 // Note that _unallocated_block is not updated here.
duke@435 96 // Allocations from the linear allocation block should
duke@435 97 // update it.
duke@435 98 } else {
duke@435 99 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 100 SmallForLinearAlloc);
duke@435 101 }
duke@435 102 // CMSIndexedFreeListReplenish should be at least 1
duke@435 103 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 104 _promoInfo.setSpace(this);
duke@435 105 if (UseCMSBestFit) {
duke@435 106 _fitStrategy = FreeBlockBestFitFirst;
duke@435 107 } else {
duke@435 108 _fitStrategy = FreeBlockStrategyNone;
duke@435 109 }
duke@435 110 checkFreeListConsistency();
duke@435 111
duke@435 112 // Initialize locks for parallel case.
duke@435 113 if (ParallelGCThreads > 0) {
duke@435 114 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 115 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 116 "a freelist par lock",
duke@435 117 true);
duke@435 118 if (_indexedFreeListParLocks[i] == NULL)
duke@435 119 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 120 DEBUG_ONLY(
duke@435 121 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 122 )
duke@435 123 }
duke@435 124 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 125 }
duke@435 126 }
duke@435 127
duke@435 128 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 129 // update the block offset table. Removed initialize_threshold call because
duke@435 130 // CFLS does not use a block offset array for contiguous spaces.
duke@435 131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 132 CompactPoint* cp, HeapWord* compact_top) {
duke@435 133 // q is alive
duke@435 134 // First check if we should switch compaction space
duke@435 135 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 136 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 137 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 138 "virtual adjustObjectSize_v() method is not correct");
duke@435 139 size_t adjusted_size = adjustObjectSize(size);
duke@435 140 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 141 "no small fragments allowed");
duke@435 142 assert(minimum_free_block_size() == MinChunkSize,
duke@435 143 "for de-virtualized reference below");
duke@435 144 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 145 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 146 adjusted_size != compaction_max_size) {
duke@435 147 do {
duke@435 148 // switch to next compaction space
duke@435 149 cp->space->set_compaction_top(compact_top);
duke@435 150 cp->space = cp->space->next_compaction_space();
duke@435 151 if (cp->space == NULL) {
duke@435 152 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 153 assert(cp->gen != NULL, "compaction must succeed");
duke@435 154 cp->space = cp->gen->first_compaction_space();
duke@435 155 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 156 }
duke@435 157 compact_top = cp->space->bottom();
duke@435 158 cp->space->set_compaction_top(compact_top);
duke@435 159 // The correct adjusted_size may not be the same as that for this method
duke@435 160 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 161 // Use the virtual method which is not used above to save the virtual
duke@435 162 // dispatch.
duke@435 163 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 164 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 165 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 166 } while (adjusted_size > compaction_max_size);
duke@435 167 }
duke@435 168
duke@435 169 // store the forwarding pointer into the mark word
duke@435 170 if ((HeapWord*)q != compact_top) {
duke@435 171 q->forward_to(oop(compact_top));
duke@435 172 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 173 } else {
duke@435 174 // if the object isn't moving we can just set the mark to the default
duke@435 175 // mark and handle it specially later on.
duke@435 176 q->init_mark();
duke@435 177 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 178 }
duke@435 179
coleenp@548 180 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 181 compact_top += adjusted_size;
duke@435 182
duke@435 183 // we need to update the offset table so that the beginnings of objects can be
duke@435 184 // found during scavenge. Note that we are updating the offset table based on
duke@435 185 // where the object will be once the compaction phase finishes.
duke@435 186
duke@435 187 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 188 // the compaction_top exceeds the current threshold but not for an
duke@435 189 // non-contiguous space.
duke@435 190 cp->threshold =
duke@435 191 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 192 return compact_top;
duke@435 193 }
duke@435 194
duke@435 195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 196 // and use of single_block instead of alloc_block. The name here is not really
duke@435 197 // appropriate - maybe a more general name could be invented for both the
duke@435 198 // contiguous and noncontiguous spaces.
duke@435 199
duke@435 200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 201 _bt.single_block(start, the_end);
duke@435 202 return end();
duke@435 203 }
duke@435 204
duke@435 205 // Initialize them to NULL.
duke@435 206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 207 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 208 // Note that on platforms where objects are double word aligned,
duke@435 209 // the odd array elements are not used. It is convenient, however,
duke@435 210 // to map directly from the object size to the array element.
duke@435 211 _indexedFreeList[i].reset(IndexSetSize);
duke@435 212 _indexedFreeList[i].set_size(i);
duke@435 213 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 214 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 215 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 216 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 217 }
duke@435 218 }
duke@435 219
duke@435 220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 221 for (int i = 1; i < IndexSetSize; i++) {
duke@435 222 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 223 "Indexed free list sizes are incorrect");
duke@435 224 _indexedFreeList[i].reset(IndexSetSize);
duke@435 225 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 226 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 227 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 228 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 229 }
duke@435 230 }
duke@435 231
duke@435 232 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 233 resetIndexedFreeListArray();
duke@435 234 dictionary()->reset();
duke@435 235 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 236 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 237 // Everything's allocated until proven otherwise.
duke@435 238 _bt.set_unallocated_block(end());
duke@435 239 }
duke@435 240 if (!mr.is_empty()) {
duke@435 241 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 242 _bt.single_block(mr.start(), mr.word_size());
duke@435 243 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 244 fc->setSize(mr.word_size());
duke@435 245 if (mr.word_size() >= IndexSetSize ) {
duke@435 246 returnChunkToDictionary(fc);
duke@435 247 } else {
duke@435 248 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 249 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 250 }
duke@435 251 }
duke@435 252 _promoInfo.reset();
duke@435 253 _smallLinearAllocBlock._ptr = NULL;
duke@435 254 _smallLinearAllocBlock._word_size = 0;
duke@435 255 }
duke@435 256
duke@435 257 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 258 // Reset the space to the new reality - one free chunk.
duke@435 259 MemRegion mr(compaction_top(), end());
duke@435 260 reset(mr);
duke@435 261 // Now refill the linear allocation block(s) if possible.
duke@435 262 if (_adaptive_freelists) {
duke@435 263 refillLinearAllocBlocksIfNeeded();
duke@435 264 } else {
duke@435 265 // Place as much of mr in the linAB as we can get,
duke@435 266 // provided it was big enough to go into the dictionary.
duke@435 267 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 268 if (fc != NULL) {
duke@435 269 assert(fc->size() == mr.word_size(),
duke@435 270 "Why was the chunk broken up?");
duke@435 271 removeChunkFromDictionary(fc);
duke@435 272 HeapWord* addr = (HeapWord*) fc;
duke@435 273 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 274 1024*SmallForLinearAlloc, fc->size());
duke@435 275 // Note that _unallocated_block is not updated here.
duke@435 276 }
duke@435 277 }
duke@435 278 }
duke@435 279
duke@435 280 // Walks the entire dictionary, returning a coterminal
duke@435 281 // chunk, if it exists. Use with caution since it involves
duke@435 282 // a potentially complete walk of a potentially large tree.
duke@435 283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 284
duke@435 285 assert_lock_strong(&_freelistLock);
duke@435 286
duke@435 287 return dictionary()->find_chunk_ends_at(end());
duke@435 288 }
duke@435 289
duke@435 290
duke@435 291 #ifndef PRODUCT
duke@435 292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 293 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 294 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 295 }
duke@435 296 }
duke@435 297
duke@435 298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 299 size_t sum = 0;
duke@435 300 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 301 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 302 }
duke@435 303 return sum;
duke@435 304 }
duke@435 305
duke@435 306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 307 size_t count = 0;
duke@435 308 for (int i = MinChunkSize; i < IndexSetSize; i++) {
duke@435 309 debug_only(
duke@435 310 ssize_t total_list_count = 0;
duke@435 311 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 312 fc = fc->next()) {
duke@435 313 total_list_count++;
duke@435 314 }
duke@435 315 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 316 "Count in list is incorrect");
duke@435 317 )
duke@435 318 count += _indexedFreeList[i].count();
duke@435 319 }
duke@435 320 return count;
duke@435 321 }
duke@435 322
duke@435 323 size_t CompactibleFreeListSpace::totalCount() {
duke@435 324 size_t num = totalCountInIndexedFreeLists();
duke@435 325 num += dictionary()->totalCount();
duke@435 326 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 327 num++;
duke@435 328 }
duke@435 329 return num;
duke@435 330 }
duke@435 331 #endif
duke@435 332
duke@435 333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 334 FreeChunk* fc = (FreeChunk*) p;
duke@435 335 return fc->isFree();
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::used() const {
duke@435 339 return capacity() - free();
duke@435 340 }
duke@435 341
duke@435 342 size_t CompactibleFreeListSpace::free() const {
duke@435 343 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 344 // if you do this while the structures are in flux you
duke@435 345 // may get an approximate answer only; for instance
duke@435 346 // because there is concurrent allocation either
duke@435 347 // directly by mutators or for promotion during a GC.
duke@435 348 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 349 // not to crash and burn, for instance, because of walking
duke@435 350 // pointers that could disappear as you were walking them.
duke@435 351 // The approximation is because the various components
duke@435 352 // that are read below are not read atomically (and
duke@435 353 // further the computation of totalSizeInIndexedFreeLists()
duke@435 354 // is itself a non-atomic computation. The normal use of
duke@435 355 // this is during a resize operation at the end of GC
duke@435 356 // and at that time you are guaranteed to get the
duke@435 357 // correct actual value. However, for instance, this is
duke@435 358 // also read completely asynchronously by the "perf-sampler"
duke@435 359 // that supports jvmstat, and you are apt to see the values
duke@435 360 // flicker in such cases.
duke@435 361 assert(_dictionary != NULL, "No _dictionary?");
duke@435 362 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 363 totalSizeInIndexedFreeLists() +
duke@435 364 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 365 }
duke@435 366
duke@435 367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 368 assert(_dictionary != NULL, "No _dictionary?");
duke@435 369 assert_locked();
duke@435 370 size_t res = _dictionary->maxChunkSize();
duke@435 371 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 372 (size_t) SmallForLinearAlloc - 1));
duke@435 373 // XXX the following could potentially be pretty slow;
duke@435 374 // should one, pesimally for the rare cases when res
duke@435 375 // caclulated above is less than IndexSetSize,
duke@435 376 // just return res calculated above? My reasoning was that
duke@435 377 // those cases will be so rare that the extra time spent doesn't
duke@435 378 // really matter....
duke@435 379 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 380 // to i > res below, because i is unsigned and res may be zero.
duke@435 381 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 382 i -= IndexSetStride) {
duke@435 383 if (_indexedFreeList[i].head() != NULL) {
duke@435 384 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 385 return i;
duke@435 386 }
duke@435 387 }
duke@435 388 return res;
duke@435 389 }
duke@435 390
duke@435 391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 392 assert_lock_strong(&_freelistLock);
duke@435 393 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 394 _dictionary->reportStatistics();
duke@435 395 if (PrintFLSStatistics > 1) {
duke@435 396 reportIndexedFreeListStatistics();
duke@435 397 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 398 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 399 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 400 }
duke@435 401 }
duke@435 402
duke@435 403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 404 assert_lock_strong(&_freelistLock);
duke@435 405 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 406 "--------------------------------\n");
duke@435 407 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 408 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 409 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 410 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 411 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 412 if (freeBlocks != 0) {
duke@435 413 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 414 }
duke@435 415 }
duke@435 416
duke@435 417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 418 size_t res = 0;
duke@435 419 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 420 debug_only(
duke@435 421 ssize_t recount = 0;
duke@435 422 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 423 fc = fc->next()) {
duke@435 424 recount += 1;
duke@435 425 }
duke@435 426 assert(recount == _indexedFreeList[i].count(),
duke@435 427 "Incorrect count in list");
duke@435 428 )
duke@435 429 res += _indexedFreeList[i].count();
duke@435 430 }
duke@435 431 return res;
duke@435 432 }
duke@435 433
duke@435 434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 435 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 436 if (_indexedFreeList[i].head() != NULL) {
duke@435 437 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 438 return (size_t)i;
duke@435 439 }
duke@435 440 }
duke@435 441 return 0;
duke@435 442 }
duke@435 443
duke@435 444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 445 HeapWord* prevEnd = end();
duke@435 446 assert(prevEnd != value, "unnecessary set_end call");
duke@435 447 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
duke@435 448 _end = value;
duke@435 449 if (prevEnd != NULL) {
duke@435 450 // Resize the underlying block offset table.
duke@435 451 _bt.resize(pointer_delta(value, bottom()));
duke@435 452 if (value <= prevEnd) {
duke@435 453 assert(value >= unallocated_block(), "New end is below unallocated block");
duke@435 454 } else {
duke@435 455 // Now, take this new chunk and add it to the free blocks.
duke@435 456 // Note that the BOT has not yet been updated for this block.
duke@435 457 size_t newFcSize = pointer_delta(value, prevEnd);
duke@435 458 // XXX This is REALLY UGLY and should be fixed up. XXX
duke@435 459 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
duke@435 460 // Mark the boundary of the new block in BOT
duke@435 461 _bt.mark_block(prevEnd, value);
duke@435 462 // put it all in the linAB
duke@435 463 if (ParallelGCThreads == 0) {
duke@435 464 _smallLinearAllocBlock._ptr = prevEnd;
duke@435 465 _smallLinearAllocBlock._word_size = newFcSize;
duke@435 466 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 467 } else { // ParallelGCThreads > 0
duke@435 468 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 469 Mutex::_no_safepoint_check_flag);
duke@435 470 _smallLinearAllocBlock._ptr = prevEnd;
duke@435 471 _smallLinearAllocBlock._word_size = newFcSize;
duke@435 472 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 473 }
duke@435 474 // Births of chunks put into a LinAB are not recorded. Births
duke@435 475 // of chunks as they are allocated out of a LinAB are.
duke@435 476 } else {
duke@435 477 // Add the block to the free lists, if possible coalescing it
duke@435 478 // with the last free block, and update the BOT and census data.
duke@435 479 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 480 }
duke@435 481 }
duke@435 482 }
duke@435 483 }
duke@435 484
duke@435 485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 486 CompactibleFreeListSpace* _cfls;
duke@435 487 CMSCollector* _collector;
duke@435 488 protected:
duke@435 489 // Override.
duke@435 490 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 491 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 492 HeapWord* bottom, HeapWord* top, \
duke@435 493 ClosureType* cl); \
duke@435 494 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 495 HeapWord* bottom, HeapWord* top, \
duke@435 496 ClosureType* cl); \
duke@435 497 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 498 HeapWord* bottom, HeapWord* top, \
duke@435 499 ClosureType* cl)
duke@435 500 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 501 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 502
duke@435 503 public:
duke@435 504 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 505 CMSCollector* collector,
duke@435 506 OopClosure* cl,
duke@435 507 CardTableModRefBS::PrecisionStyle precision,
duke@435 508 HeapWord* boundary) :
duke@435 509 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 510 _cfls(sp), _collector(collector) {}
duke@435 511 };
duke@435 512
duke@435 513 // We de-virtualize the block-related calls below, since we know that our
duke@435 514 // space is a CompactibleFreeListSpace.
duke@435 515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 517 HeapWord* bottom, \
duke@435 518 HeapWord* top, \
duke@435 519 ClosureType* cl) { \
duke@435 520 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 521 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 522 } else { \
duke@435 523 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 524 } \
duke@435 525 } \
duke@435 526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 527 HeapWord* bottom, \
duke@435 528 HeapWord* top, \
duke@435 529 ClosureType* cl) { \
duke@435 530 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 531 back too far. */ \
duke@435 532 HeapWord* mr_start = mr.start(); \
duke@435 533 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 534 HeapWord* next = bottom + bot_size; \
duke@435 535 while (next < mr_start) { \
duke@435 536 bottom = next; \
duke@435 537 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 538 next = bottom + bot_size; \
duke@435 539 } \
duke@435 540 \
duke@435 541 while (bottom < top) { \
duke@435 542 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 543 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 544 oop(bottom)) && \
duke@435 545 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 546 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 547 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 548 } else { \
duke@435 549 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 550 } \
duke@435 551 } \
duke@435 552 } \
duke@435 553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 554 HeapWord* bottom, \
duke@435 555 HeapWord* top, \
duke@435 556 ClosureType* cl) { \
duke@435 557 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 558 back too far. */ \
duke@435 559 HeapWord* mr_start = mr.start(); \
duke@435 560 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 561 HeapWord* next = bottom + bot_size; \
duke@435 562 while (next < mr_start) { \
duke@435 563 bottom = next; \
duke@435 564 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 565 next = bottom + bot_size; \
duke@435 566 } \
duke@435 567 \
duke@435 568 while (bottom < top) { \
duke@435 569 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 570 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 571 oop(bottom)) && \
duke@435 572 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 573 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 574 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 575 } else { \
duke@435 576 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 577 } \
duke@435 578 } \
duke@435 579 }
duke@435 580
duke@435 581 // (There are only two of these, rather than N, because the split is due
duke@435 582 // only to the introduction of the FilteringClosure, a local part of the
duke@435 583 // impl of this abstraction.)
duke@435 584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 586
duke@435 587 DirtyCardToOopClosure*
duke@435 588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 589 CardTableModRefBS::PrecisionStyle precision,
duke@435 590 HeapWord* boundary) {
duke@435 591 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 592 }
duke@435 593
duke@435 594
duke@435 595 // Note on locking for the space iteration functions:
duke@435 596 // since the collector's iteration activities are concurrent with
duke@435 597 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 598 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 599 // may suddenly be allocated or divided up and part of it allocated and
duke@435 600 // so on.
duke@435 601
duke@435 602 // Apply the given closure to each block in the space.
duke@435 603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 604 assert_lock_strong(freelistLock());
duke@435 605 HeapWord *cur, *limit;
duke@435 606 for (cur = bottom(), limit = end(); cur < limit;
duke@435 607 cur += cl->do_blk_careful(cur));
duke@435 608 }
duke@435 609
duke@435 610 // Apply the given closure to each block in the space.
duke@435 611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 612 assert_lock_strong(freelistLock());
duke@435 613 HeapWord *cur, *limit;
duke@435 614 for (cur = bottom(), limit = end(); cur < limit;
duke@435 615 cur += cl->do_blk(cur));
duke@435 616 }
duke@435 617
duke@435 618 // Apply the given closure to each oop in the space.
duke@435 619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 620 assert_lock_strong(freelistLock());
duke@435 621 HeapWord *cur, *limit;
duke@435 622 size_t curSize;
duke@435 623 for (cur = bottom(), limit = end(); cur < limit;
duke@435 624 cur += curSize) {
duke@435 625 curSize = block_size(cur);
duke@435 626 if (block_is_obj(cur)) {
duke@435 627 oop(cur)->oop_iterate(cl);
duke@435 628 }
duke@435 629 }
duke@435 630 }
duke@435 631
duke@435 632 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 634 assert_lock_strong(freelistLock());
duke@435 635 if (is_empty()) {
duke@435 636 return;
duke@435 637 }
duke@435 638 MemRegion cur = MemRegion(bottom(), end());
duke@435 639 mr = mr.intersection(cur);
duke@435 640 if (mr.is_empty()) {
duke@435 641 return;
duke@435 642 }
duke@435 643 if (mr.equals(cur)) {
duke@435 644 oop_iterate(cl);
duke@435 645 return;
duke@435 646 }
duke@435 647 assert(mr.end() <= end(), "just took an intersection above");
duke@435 648 HeapWord* obj_addr = block_start(mr.start());
duke@435 649 HeapWord* t = mr.end();
duke@435 650
duke@435 651 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 652 if (block_is_obj(obj_addr)) {
duke@435 653 // Handle first object specially.
duke@435 654 oop obj = oop(obj_addr);
duke@435 655 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 656 } else {
duke@435 657 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 658 obj_addr += fc->size();
duke@435 659 }
duke@435 660 while (obj_addr < t) {
duke@435 661 HeapWord* obj = obj_addr;
duke@435 662 obj_addr += block_size(obj_addr);
duke@435 663 // If "obj_addr" is not greater than top, then the
duke@435 664 // entire object "obj" is within the region.
duke@435 665 if (obj_addr <= t) {
duke@435 666 if (block_is_obj(obj)) {
duke@435 667 oop(obj)->oop_iterate(cl);
duke@435 668 }
duke@435 669 } else {
duke@435 670 // "obj" extends beyond end of region
duke@435 671 if (block_is_obj(obj)) {
duke@435 672 oop(obj)->oop_iterate(&smr_blk);
duke@435 673 }
duke@435 674 break;
duke@435 675 }
duke@435 676 }
duke@435 677 }
duke@435 678
duke@435 679 // NOTE: In the following methods, in order to safely be able to
duke@435 680 // apply the closure to an object, we need to be sure that the
duke@435 681 // object has been initialized. We are guaranteed that an object
duke@435 682 // is initialized if we are holding the Heap_lock with the
duke@435 683 // world stopped.
duke@435 684 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 685 if (is_init_completed()) {
duke@435 686 assert_locked_or_safepoint(Heap_lock);
duke@435 687 if (Universe::is_fully_initialized()) {
duke@435 688 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 689 "Required for objects to be initialized");
duke@435 690 }
duke@435 691 } // else make a concession at vm start-up
duke@435 692 }
duke@435 693
duke@435 694 // Apply the given closure to each object in the space
duke@435 695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 696 assert_lock_strong(freelistLock());
duke@435 697 NOT_PRODUCT(verify_objects_initialized());
duke@435 698 HeapWord *cur, *limit;
duke@435 699 size_t curSize;
duke@435 700 for (cur = bottom(), limit = end(); cur < limit;
duke@435 701 cur += curSize) {
duke@435 702 curSize = block_size(cur);
duke@435 703 if (block_is_obj(cur)) {
duke@435 704 blk->do_object(oop(cur));
duke@435 705 }
duke@435 706 }
duke@435 707 }
duke@435 708
duke@435 709 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 710 UpwardsObjectClosure* cl) {
duke@435 711 assert_locked();
duke@435 712 NOT_PRODUCT(verify_objects_initialized());
duke@435 713 Space::object_iterate_mem(mr, cl);
duke@435 714 }
duke@435 715
duke@435 716 // Callers of this iterator beware: The closure application should
duke@435 717 // be robust in the face of uninitialized objects and should (always)
duke@435 718 // return a correct size so that the next addr + size below gives us a
duke@435 719 // valid block boundary. [See for instance,
duke@435 720 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 721 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 722 HeapWord*
duke@435 723 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 724 assert_lock_strong(freelistLock());
duke@435 725 HeapWord *addr, *last;
duke@435 726 size_t size;
duke@435 727 for (addr = bottom(), last = end();
duke@435 728 addr < last; addr += size) {
duke@435 729 FreeChunk* fc = (FreeChunk*)addr;
duke@435 730 if (fc->isFree()) {
duke@435 731 // Since we hold the free list lock, which protects direct
duke@435 732 // allocation in this generation by mutators, a free object
duke@435 733 // will remain free throughout this iteration code.
duke@435 734 size = fc->size();
duke@435 735 } else {
duke@435 736 // Note that the object need not necessarily be initialized,
duke@435 737 // because (for instance) the free list lock does NOT protect
duke@435 738 // object initialization. The closure application below must
duke@435 739 // therefore be correct in the face of uninitialized objects.
duke@435 740 size = cl->do_object_careful(oop(addr));
duke@435 741 if (size == 0) {
duke@435 742 // An unparsable object found. Signal early termination.
duke@435 743 return addr;
duke@435 744 }
duke@435 745 }
duke@435 746 }
duke@435 747 return NULL;
duke@435 748 }
duke@435 749
duke@435 750 // Callers of this iterator beware: The closure application should
duke@435 751 // be robust in the face of uninitialized objects and should (always)
duke@435 752 // return a correct size so that the next addr + size below gives us a
duke@435 753 // valid block boundary. [See for instance,
duke@435 754 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 755 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 756 HeapWord*
duke@435 757 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 758 ObjectClosureCareful* cl) {
duke@435 759 assert_lock_strong(freelistLock());
duke@435 760 // Can't use used_region() below because it may not necessarily
duke@435 761 // be the same as [bottom(),end()); although we could
duke@435 762 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 763 // that appears too cumbersome, so we just do the simpler check
duke@435 764 // in the assertion below.
duke@435 765 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 766 "mr should be non-empty and within used space");
duke@435 767 HeapWord *addr, *end;
duke@435 768 size_t size;
duke@435 769 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 770 addr < end; addr += size) {
duke@435 771 FreeChunk* fc = (FreeChunk*)addr;
duke@435 772 if (fc->isFree()) {
duke@435 773 // Since we hold the free list lock, which protects direct
duke@435 774 // allocation in this generation by mutators, a free object
duke@435 775 // will remain free throughout this iteration code.
duke@435 776 size = fc->size();
duke@435 777 } else {
duke@435 778 // Note that the object need not necessarily be initialized,
duke@435 779 // because (for instance) the free list lock does NOT protect
duke@435 780 // object initialization. The closure application below must
duke@435 781 // therefore be correct in the face of uninitialized objects.
duke@435 782 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 783 if (size == 0) {
duke@435 784 // An unparsable object found. Signal early termination.
duke@435 785 return addr;
duke@435 786 }
duke@435 787 }
duke@435 788 }
duke@435 789 return NULL;
duke@435 790 }
duke@435 791
duke@435 792
duke@435 793 HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
duke@435 794 NOT_PRODUCT(verify_objects_initialized());
duke@435 795 return _bt.block_start(p);
duke@435 796 }
duke@435 797
duke@435 798 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 799 return _bt.block_start_careful(p);
duke@435 800 }
duke@435 801
duke@435 802 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 803 NOT_PRODUCT(verify_objects_initialized());
duke@435 804 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 805 // This must be volatile, or else there is a danger that the compiler
duke@435 806 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 807 // the value read the first time in a register.
duke@435 808 oop o = (oop)p;
duke@435 809 volatile oop* second_word_addr = o->klass_addr();
duke@435 810 while (true) {
duke@435 811 klassOop k = (klassOop)(*second_word_addr);
duke@435 812 // We must do this until we get a consistent view of the object.
duke@435 813 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
duke@435 814 FreeChunk* fc = (FreeChunk*)p;
duke@435 815 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
duke@435 816 size_t res = (*sz_addr);
duke@435 817 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
duke@435 818 if (k == k2) {
duke@435 819 assert(res != 0, "Block size should not be 0");
duke@435 820 return res;
duke@435 821 }
duke@435 822 } else if (k != NULL) {
duke@435 823 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
duke@435 824 assert(o->is_parsable(), "Should be parsable");
duke@435 825 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
duke@435 826 size_t res = o->size_given_klass(k->klass_part());
duke@435 827 res = adjustObjectSize(res);
duke@435 828 assert(res != 0, "Block size should not be 0");
duke@435 829 return res;
duke@435 830 }
duke@435 831 }
duke@435 832 }
duke@435 833
duke@435 834 // A variant of the above that uses the Printezis bits for
duke@435 835 // unparsable but allocated objects. This avoids any possible
duke@435 836 // stalls waiting for mutators to initialize objects, and is
duke@435 837 // thus potentially faster than the variant above. However,
duke@435 838 // this variant may return a zero size for a block that is
duke@435 839 // under mutation and for which a consistent size cannot be
duke@435 840 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 841 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 842 const CMSCollector* c)
duke@435 843 const {
duke@435 844 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 845 // This must be volatile, or else there is a danger that the compiler
duke@435 846 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 847 // the value read the first time in a register.
duke@435 848 oop o = (oop)p;
duke@435 849 volatile oop* second_word_addr = o->klass_addr();
duke@435 850 DEBUG_ONLY(uint loops = 0;)
duke@435 851 while (true) {
duke@435 852 klassOop k = (klassOop)(*second_word_addr);
duke@435 853 // We must do this until we get a consistent view of the object.
duke@435 854 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
duke@435 855 FreeChunk* fc = (FreeChunk*)p;
duke@435 856 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
duke@435 857 size_t res = (*sz_addr);
duke@435 858 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
duke@435 859 if (k == k2) {
duke@435 860 assert(res != 0, "Block size should not be 0");
duke@435 861 assert(loops == 0, "Should be 0");
duke@435 862 return res;
duke@435 863 }
duke@435 864 } else if (k != NULL && o->is_parsable()) {
duke@435 865 assert(k->is_oop(), "Should really be klass oop.");
duke@435 866 assert(o->is_oop(), "Should be an oop");
duke@435 867 size_t res = o->size_given_klass(k->klass_part());
duke@435 868 res = adjustObjectSize(res);
duke@435 869 assert(res != 0, "Block size should not be 0");
duke@435 870 return res;
duke@435 871 } else {
duke@435 872 return c->block_size_if_printezis_bits(p);
duke@435 873 }
duke@435 874 assert(loops == 0, "Can loop at most once");
duke@435 875 DEBUG_ONLY(loops++;)
duke@435 876 }
duke@435 877 }
duke@435 878
duke@435 879 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 880 NOT_PRODUCT(verify_objects_initialized());
duke@435 881 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 882 FreeChunk* fc = (FreeChunk*)p;
duke@435 883 if (fc->isFree()) {
duke@435 884 return fc->size();
duke@435 885 } else {
duke@435 886 // Ignore mark word because this may be a recently promoted
duke@435 887 // object whose mark word is used to chain together grey
duke@435 888 // objects (the last one would have a null value).
duke@435 889 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 890 return adjustObjectSize(oop(p)->size());
duke@435 891 }
duke@435 892 }
duke@435 893
duke@435 894 // This implementation assumes that the property of "being an object" is
duke@435 895 // stable. But being a free chunk may not be (because of parallel
duke@435 896 // promotion.)
duke@435 897 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 898 FreeChunk* fc = (FreeChunk*)p;
duke@435 899 assert(is_in_reserved(p), "Should be in space");
duke@435 900 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 901 // assertion may fail because prepare_for_compaction() uses
duke@435 902 // space that is garbage to maintain information on ranges of
duke@435 903 // live objects so that these live ranges can be moved as a whole.
duke@435 904 // Comment out this assertion until that problem can be solved
duke@435 905 // (i.e., that the block start calculation may look at objects
duke@435 906 // at address below "p" in finding the object that contains "p"
duke@435 907 // and those objects (if garbage) may have been modified to hold
duke@435 908 // live range information.
duke@435 909 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
duke@435 910 klassOop k = oop(p)->klass();
duke@435 911 intptr_t ki = (intptr_t)k;
duke@435 912 if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
duke@435 913 if (k != NULL) {
duke@435 914 // Ignore mark word because it may have been used to
duke@435 915 // chain together promoted objects (the last one
duke@435 916 // would have a null value).
duke@435 917 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 918 return true;
duke@435 919 } else {
duke@435 920 return false; // Was not an object at the start of collection.
duke@435 921 }
duke@435 922 }
duke@435 923
duke@435 924 // Check if the object is alive. This fact is checked either by consulting
duke@435 925 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 926 // generation and we're not in the sweeping phase, by checking the
duke@435 927 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 928 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 929 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
duke@435 930 assert (block_is_obj(p), "The address should point to an object");
duke@435 931
duke@435 932 // If we're sweeping, we use object liveness information from the main bit map
duke@435 933 // for both perm gen and old gen.
duke@435 934 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 935 // EITHER we are in the middle of the sweeping phase, and the
duke@435 936 // main marking bit map (live_map below) is locked,
duke@435 937 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 938 // is stable, because it's mutated only in the sweeping phase.
duke@435 939 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 940 CMSBitMap* live_map = _collector->markBitMap();
duke@435 941 return live_map->isMarked((HeapWord*) p);
duke@435 942 } else {
duke@435 943 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 944 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 945 // in the perm gen. In this case, we use the "deadness" information
duke@435 946 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 947 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 948 if (_collector->verifying()) {
duke@435 949 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 950 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 951 // when we know that it's dead; if the bitmap is not allocated then
duke@435 952 // the object is alive.
duke@435 953 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 954 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 955 } else {
duke@435 956 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 957 }
duke@435 958 }
duke@435 959 }
duke@435 960 return true;
duke@435 961 }
duke@435 962
duke@435 963 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 964 FreeChunk* fc = (FreeChunk*)p;
duke@435 965 assert(is_in_reserved(p), "Should be in space");
duke@435 966 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 967 if (!fc->isFree()) {
duke@435 968 // Ignore mark word because it may have been used to
duke@435 969 // chain together promoted objects (the last one
duke@435 970 // would have a null value).
duke@435 971 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 972 return true;
duke@435 973 }
duke@435 974 return false;
duke@435 975 }
duke@435 976
duke@435 977 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 978 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 979 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 980 size_t size = 0;
duke@435 981 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 982 debug_only(
duke@435 983 // We may be calling here without the lock in which case we
duke@435 984 // won't do this modest sanity check.
duke@435 985 if (freelistLock()->owned_by_self()) {
duke@435 986 size_t total_list_size = 0;
duke@435 987 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 988 fc = fc->next()) {
duke@435 989 total_list_size += i;
duke@435 990 }
duke@435 991 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 992 "Count in list is incorrect");
duke@435 993 }
duke@435 994 )
duke@435 995 size += i * _indexedFreeList[i].count();
duke@435 996 }
duke@435 997 return size;
duke@435 998 }
duke@435 999
duke@435 1000 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1001 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1002 return allocate(size);
duke@435 1003 }
duke@435 1004
duke@435 1005 HeapWord*
duke@435 1006 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1007 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1008 }
duke@435 1009
duke@435 1010 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1011 assert_lock_strong(freelistLock());
duke@435 1012 HeapWord* res = NULL;
duke@435 1013 assert(size == adjustObjectSize(size),
duke@435 1014 "use adjustObjectSize() before calling into allocate()");
duke@435 1015
duke@435 1016 if (_adaptive_freelists) {
duke@435 1017 res = allocate_adaptive_freelists(size);
duke@435 1018 } else { // non-adaptive free lists
duke@435 1019 res = allocate_non_adaptive_freelists(size);
duke@435 1020 }
duke@435 1021
duke@435 1022 if (res != NULL) {
duke@435 1023 // check that res does lie in this space!
duke@435 1024 assert(is_in_reserved(res), "Not in this space!");
duke@435 1025 assert(is_aligned((void*)res), "alignment check");
duke@435 1026
duke@435 1027 FreeChunk* fc = (FreeChunk*)res;
duke@435 1028 fc->markNotFree();
duke@435 1029 assert(!fc->isFree(), "shouldn't be marked free");
duke@435 1030 assert(oop(fc)->klass() == NULL, "should look uninitialized");
duke@435 1031 // Verify that the block offset table shows this to
duke@435 1032 // be a single block, but not one which is unallocated.
duke@435 1033 _bt.verify_single_block(res, size);
duke@435 1034 _bt.verify_not_unallocated(res, size);
duke@435 1035 // mangle a just allocated object with a distinct pattern.
duke@435 1036 debug_only(fc->mangleAllocated(size));
duke@435 1037 }
duke@435 1038
duke@435 1039 return res;
duke@435 1040 }
duke@435 1041
duke@435 1042 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1043 HeapWord* res = NULL;
duke@435 1044 // try and use linear allocation for smaller blocks
duke@435 1045 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1046 // if successful, the following also adjusts block offset table
duke@435 1047 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1048 }
duke@435 1049 // Else triage to indexed lists for smaller sizes
duke@435 1050 if (res == NULL) {
duke@435 1051 if (size < SmallForDictionary) {
duke@435 1052 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1053 } else {
duke@435 1054 // else get it from the big dictionary; if even this doesn't
duke@435 1055 // work we are out of luck.
duke@435 1056 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1057 }
duke@435 1058 }
duke@435 1059
duke@435 1060 return res;
duke@435 1061 }
duke@435 1062
duke@435 1063 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1064 assert_lock_strong(freelistLock());
duke@435 1065 HeapWord* res = NULL;
duke@435 1066 assert(size == adjustObjectSize(size),
duke@435 1067 "use adjustObjectSize() before calling into allocate()");
duke@435 1068
duke@435 1069 // Strategy
duke@435 1070 // if small
duke@435 1071 // exact size from small object indexed list if small
duke@435 1072 // small or large linear allocation block (linAB) as appropriate
duke@435 1073 // take from lists of greater sized chunks
duke@435 1074 // else
duke@435 1075 // dictionary
duke@435 1076 // small or large linear allocation block if it has the space
duke@435 1077 // Try allocating exact size from indexTable first
duke@435 1078 if (size < IndexSetSize) {
duke@435 1079 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1080 if(res != NULL) {
duke@435 1081 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1082 "Not removed from free list");
duke@435 1083 // no block offset table adjustment is necessary on blocks in
duke@435 1084 // the indexed lists.
duke@435 1085
duke@435 1086 // Try allocating from the small LinAB
duke@435 1087 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1088 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1089 // if successful, the above also adjusts block offset table
duke@435 1090 // Note that this call will refill the LinAB to
duke@435 1091 // satisfy the request. This is different that
duke@435 1092 // evm.
duke@435 1093 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1094
duke@435 1095 } else {
duke@435 1096 // Raid the exact free lists larger than size, even if they are not
duke@435 1097 // overpopulated.
duke@435 1098 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1099 }
duke@435 1100 } else {
duke@435 1101 // Big objects get allocated directly from the dictionary.
duke@435 1102 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1103 if (res == NULL) {
duke@435 1104 // Try hard not to fail since an allocation failure will likely
duke@435 1105 // trigger a synchronous GC. Try to get the space from the
duke@435 1106 // allocation blocks.
duke@435 1107 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1108 }
duke@435 1109 }
duke@435 1110
duke@435 1111 return res;
duke@435 1112 }
duke@435 1113
duke@435 1114 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1115 // when promoting obj.
duke@435 1116 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1117 // Depending on the object size, expansion may require refilling either a
duke@435 1118 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1119 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1120 size_t space = obj_size;
duke@435 1121 if (!_adaptive_freelists) {
duke@435 1122 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1123 }
duke@435 1124 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1125 return space;
duke@435 1126 }
duke@435 1127
duke@435 1128 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1129 FreeChunk* ret;
duke@435 1130
duke@435 1131 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1132 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1133 "Should not be here");
duke@435 1134
duke@435 1135 size_t i;
duke@435 1136 size_t currSize = numWords + MinChunkSize;
duke@435 1137 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1138 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1139 FreeList* fl = &_indexedFreeList[i];
duke@435 1140 if (fl->head()) {
duke@435 1141 ret = getFromListGreater(fl, numWords);
duke@435 1142 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1143 return ret;
duke@435 1144 }
duke@435 1145 }
duke@435 1146
duke@435 1147 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1148 (size_t)(numWords + MinChunkSize));
duke@435 1149
duke@435 1150 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1151 fragmentation that can't be handled. */
duke@435 1152 {
duke@435 1153 ret = dictionary()->getChunk(currSize);
duke@435 1154 if (ret != NULL) {
duke@435 1155 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1156 "Chunk is too small");
duke@435 1157 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1158 /* Carve returned chunk. */
duke@435 1159 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1160 /* Label this as no longer a free chunk. */
duke@435 1161 assert(ret->isFree(), "This chunk should be free");
duke@435 1162 ret->linkPrev(NULL);
duke@435 1163 }
duke@435 1164 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1165 return ret;
duke@435 1166 }
duke@435 1167 ShouldNotReachHere();
duke@435 1168 }
duke@435 1169
duke@435 1170 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
duke@435 1171 const {
duke@435 1172 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1173 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1174 }
duke@435 1175
duke@435 1176 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
duke@435 1177 if (fc->size() >= IndexSetSize) {
duke@435 1178 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1179 } else {
duke@435 1180 return verifyChunkInIndexedFreeLists(fc);
duke@435 1181 }
duke@435 1182 }
duke@435 1183
duke@435 1184 #ifndef PRODUCT
duke@435 1185 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1186 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1187 }
duke@435 1188 #endif
duke@435 1189
duke@435 1190 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1191 // In the parallel case, the main thread holds the free list lock
duke@435 1192 // on behalf the parallel threads.
duke@435 1193 assert_locked();
duke@435 1194 FreeChunk* fc;
duke@435 1195 {
duke@435 1196 // If GC is parallel, this might be called by several threads.
duke@435 1197 // This should be rare enough that the locking overhead won't affect
duke@435 1198 // the sequential code.
duke@435 1199 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1200 Mutex::_no_safepoint_check_flag);
duke@435 1201 fc = getChunkFromDictionary(size);
duke@435 1202 }
duke@435 1203 if (fc != NULL) {
duke@435 1204 fc->dontCoalesce();
duke@435 1205 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1206 // Verify that the block offset table shows this to
duke@435 1207 // be a single block, but not one which is unallocated.
duke@435 1208 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1209 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1210 }
duke@435 1211 return fc;
duke@435 1212 }
duke@435 1213
coleenp@548 1214 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1215 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1216 assert_locked();
duke@435 1217
duke@435 1218 // if we are tracking promotions, then first ensure space for
duke@435 1219 // promotion (including spooling space for saving header if necessary).
duke@435 1220 // then allocate and copy, then track promoted info if needed.
duke@435 1221 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1222 // be displaced and in this case restoration of the mark word
duke@435 1223 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1224 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1225 return NULL;
duke@435 1226 }
duke@435 1227 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1228 // additional call through the allocate(size_t) form. Having
duke@435 1229 // the compile inline the call is problematic because allocate(size_t)
duke@435 1230 // is a virtual method.
duke@435 1231 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1232 if (res != NULL) {
duke@435 1233 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1234 // if we should be tracking promotions, do so.
duke@435 1235 if (_promoInfo.tracking()) {
duke@435 1236 _promoInfo.track((PromotedObject*)res);
duke@435 1237 }
duke@435 1238 }
duke@435 1239 return oop(res);
duke@435 1240 }
duke@435 1241
duke@435 1242 HeapWord*
duke@435 1243 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1244 assert_locked();
duke@435 1245 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1246 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1247 "maximum from smallLinearAllocBlock");
duke@435 1248 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1249 }
duke@435 1250
duke@435 1251 HeapWord*
duke@435 1252 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1253 size_t size) {
duke@435 1254 assert_locked();
duke@435 1255 assert(size >= MinChunkSize, "too small");
duke@435 1256 HeapWord* res = NULL;
duke@435 1257 // Try to do linear allocation from blk, making sure that
duke@435 1258 if (blk->_word_size == 0) {
duke@435 1259 // We have probably been unable to fill this either in the prologue or
duke@435 1260 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1261 // next time.
duke@435 1262 assert(blk->_ptr == NULL, "consistency check");
duke@435 1263 return NULL;
duke@435 1264 }
duke@435 1265 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1266 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1267 if (res != NULL) return res;
duke@435 1268
duke@435 1269 // about to exhaust this linear allocation block
duke@435 1270 if (blk->_word_size == size) { // exactly satisfied
duke@435 1271 res = blk->_ptr;
duke@435 1272 _bt.allocated(res, blk->_word_size);
duke@435 1273 } else if (size + MinChunkSize <= blk->_refillSize) {
duke@435 1274 // Update _unallocated_block if the size is such that chunk would be
duke@435 1275 // returned to the indexed free list. All other chunks in the indexed
duke@435 1276 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1277 // has already been adjusted for them. Do it here so that the cost
duke@435 1278 // for all chunks added back to the indexed free lists.
duke@435 1279 if (blk->_word_size < SmallForDictionary) {
duke@435 1280 _bt.allocated(blk->_ptr, blk->_word_size);
duke@435 1281 }
duke@435 1282 // Return the chunk that isn't big enough, and then refill below.
duke@435 1283 addChunkToFreeLists(blk->_ptr, blk->_word_size);
duke@435 1284 _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
duke@435 1285 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1286 } else {
duke@435 1287 // A refilled block would not satisfy the request.
duke@435 1288 return NULL;
duke@435 1289 }
duke@435 1290
duke@435 1291 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1292 refillLinearAllocBlock(blk);
duke@435 1293 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1294 "block was replenished");
duke@435 1295 if (res != NULL) {
duke@435 1296 splitBirth(size);
duke@435 1297 repairLinearAllocBlock(blk);
duke@435 1298 } else if (blk->_ptr != NULL) {
duke@435 1299 res = blk->_ptr;
duke@435 1300 size_t blk_size = blk->_word_size;
duke@435 1301 blk->_word_size -= size;
duke@435 1302 blk->_ptr += size;
duke@435 1303 splitBirth(size);
duke@435 1304 repairLinearAllocBlock(blk);
duke@435 1305 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1306 // view of the BOT and free blocks.
duke@435 1307 // Above must occur before BOT is updated below.
duke@435 1308 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1309 }
duke@435 1310 return res;
duke@435 1311 }
duke@435 1312
duke@435 1313 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1314 LinearAllocBlock* blk,
duke@435 1315 size_t size) {
duke@435 1316 assert_locked();
duke@435 1317 assert(size >= MinChunkSize, "too small");
duke@435 1318
duke@435 1319 HeapWord* res = NULL;
duke@435 1320 // This is the common case. Keep it simple.
duke@435 1321 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1322 assert(blk->_ptr != NULL, "consistency check");
duke@435 1323 res = blk->_ptr;
duke@435 1324 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1325 // indicates the start of the linAB. The split_block() updates the
duke@435 1326 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1327 // next chunk to be allocated).
duke@435 1328 size_t blk_size = blk->_word_size;
duke@435 1329 blk->_word_size -= size;
duke@435 1330 blk->_ptr += size;
duke@435 1331 splitBirth(size);
duke@435 1332 repairLinearAllocBlock(blk);
duke@435 1333 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1334 // view of the BOT and free blocks.
duke@435 1335 // Above must occur before BOT is updated below.
duke@435 1336 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1337 _bt.allocated(res, size);
duke@435 1338 }
duke@435 1339 return res;
duke@435 1340 }
duke@435 1341
duke@435 1342 FreeChunk*
duke@435 1343 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1344 assert_locked();
duke@435 1345 assert(size < SmallForDictionary, "just checking");
duke@435 1346 FreeChunk* res;
duke@435 1347 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1348 if (res == NULL) {
duke@435 1349 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1350 }
duke@435 1351 _bt.verify_not_unallocated((HeapWord*) res, size);
duke@435 1352 return res;
duke@435 1353 }
duke@435 1354
duke@435 1355 FreeChunk*
duke@435 1356 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
duke@435 1357 assert_locked();
duke@435 1358 FreeChunk* fc = NULL;
duke@435 1359 if (size < SmallForDictionary) {
duke@435 1360 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1361 _indexedFreeList[size].surplus() <= 0,
duke@435 1362 "List for this size should be empty or under populated");
duke@435 1363 // Try best fit in exact lists before replenishing the list
duke@435 1364 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1365 // Replenish list.
duke@435 1366 //
duke@435 1367 // Things tried that failed.
duke@435 1368 // Tried allocating out of the two LinAB's first before
duke@435 1369 // replenishing lists.
duke@435 1370 // Tried small linAB of size 256 (size in indexed list)
duke@435 1371 // and replenishing indexed lists from the small linAB.
duke@435 1372 //
duke@435 1373 FreeChunk* newFc = NULL;
duke@435 1374 size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1375 if (replenish_size < SmallForDictionary) {
duke@435 1376 // Do not replenish from an underpopulated size.
duke@435 1377 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1378 _indexedFreeList[replenish_size].head() != NULL) {
duke@435 1379 newFc =
duke@435 1380 _indexedFreeList[replenish_size].getChunkAtHead();
duke@435 1381 } else {
duke@435 1382 newFc = bestFitSmall(replenish_size);
duke@435 1383 }
duke@435 1384 }
duke@435 1385 if (newFc != NULL) {
duke@435 1386 splitDeath(replenish_size);
duke@435 1387 } else if (replenish_size > size) {
duke@435 1388 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
duke@435 1389 newFc =
duke@435 1390 getChunkFromIndexedFreeListHelper(replenish_size);
duke@435 1391 }
duke@435 1392 if (newFc != NULL) {
duke@435 1393 assert(newFc->size() == replenish_size, "Got wrong size");
duke@435 1394 size_t i;
duke@435 1395 FreeChunk *curFc, *nextFc;
duke@435 1396 // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
duke@435 1397 // The last chunk is not added to the lists but is returned as the
duke@435 1398 // free chunk.
duke@435 1399 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
duke@435 1400 i = 0;
duke@435 1401 i < (CMSIndexedFreeListReplenish - 1);
duke@435 1402 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
duke@435 1403 i++) {
duke@435 1404 curFc->setSize(size);
duke@435 1405 // Don't record this as a return in order to try and
duke@435 1406 // determine the "returns" from a GC.
duke@435 1407 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1408 _indexedFreeList[size].returnChunkAtTail(curFc, false);
duke@435 1409 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1410 splitBirth(size);
duke@435 1411 // Don't record the initial population of the indexed list
duke@435 1412 // as a split birth.
duke@435 1413 }
duke@435 1414
duke@435 1415 // check that the arithmetic was OK above
duke@435 1416 assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
duke@435 1417 "inconsistency in carving newFc");
duke@435 1418 curFc->setSize(size);
duke@435 1419 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1420 splitBirth(size);
duke@435 1421 return curFc;
duke@435 1422 }
duke@435 1423 }
duke@435 1424 } else {
duke@435 1425 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1426 // replenish the indexed free list.
duke@435 1427 fc = getChunkFromDictionaryExact(size);
duke@435 1428 }
duke@435 1429 assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1430 return fc;
duke@435 1431 }
duke@435 1432
duke@435 1433 FreeChunk*
duke@435 1434 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1435 assert_locked();
duke@435 1436 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1437 if (fc == NULL) {
duke@435 1438 return NULL;
duke@435 1439 }
duke@435 1440 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1441 if (fc->size() >= size + MinChunkSize) {
duke@435 1442 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1443 }
duke@435 1444 assert(fc->size() >= size, "chunk too small");
duke@435 1445 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1446 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1447 return fc;
duke@435 1448 }
duke@435 1449
duke@435 1450 FreeChunk*
duke@435 1451 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1452 assert_locked();
duke@435 1453 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1454 if (fc == NULL) {
duke@435 1455 return fc;
duke@435 1456 }
duke@435 1457 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1458 if (fc->size() == size) {
duke@435 1459 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1460 return fc;
duke@435 1461 }
duke@435 1462 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1463 if (fc->size() < size + MinChunkSize) {
duke@435 1464 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1465 returnChunkToDictionary(fc);
duke@435 1466 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1467 if (fc == NULL) {
duke@435 1468 return NULL;
duke@435 1469 }
duke@435 1470 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1471 }
duke@435 1472 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1473 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1474 assert(fc->size() == size, "chunk is wrong size");
duke@435 1475 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1476 return fc;
duke@435 1477 }
duke@435 1478
duke@435 1479 void
duke@435 1480 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1481 assert_locked();
duke@435 1482
duke@435 1483 size_t size = chunk->size();
duke@435 1484 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1485 // adjust _unallocated_block downward, as necessary
duke@435 1486 _bt.freed((HeapWord*)chunk, size);
duke@435 1487 _dictionary->returnChunk(chunk);
duke@435 1488 }
duke@435 1489
duke@435 1490 void
duke@435 1491 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1492 assert_locked();
duke@435 1493 size_t size = fc->size();
duke@435 1494 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1495 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1496 if (_adaptive_freelists) {
duke@435 1497 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1498 } else {
duke@435 1499 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1500 }
duke@435 1501 }
duke@435 1502
duke@435 1503 // Add chunk to end of last block -- if it's the largest
duke@435 1504 // block -- and update BOT and census data. We would
duke@435 1505 // of course have preferred to coalesce it with the
duke@435 1506 // last block, but it's currently less expensive to find the
duke@435 1507 // largest block than it is to find the last.
duke@435 1508 void
duke@435 1509 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1510 HeapWord* chunk, size_t size) {
duke@435 1511 // check that the chunk does lie in this space!
duke@435 1512 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1513 assert_locked();
duke@435 1514 // One of the parallel gc task threads may be here
duke@435 1515 // whilst others are allocating.
duke@435 1516 Mutex* lock = NULL;
duke@435 1517 if (ParallelGCThreads != 0) {
duke@435 1518 lock = &_parDictionaryAllocLock;
duke@435 1519 }
duke@435 1520 FreeChunk* ec;
duke@435 1521 {
duke@435 1522 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1523 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1524 if (ec != NULL && ec->end() == chunk) {
duke@435 1525 // It's a coterminal block - we can coalesce.
duke@435 1526 size_t old_size = ec->size();
duke@435 1527 coalDeath(old_size);
duke@435 1528 removeChunkFromDictionary(ec);
duke@435 1529 size += old_size;
duke@435 1530 } else {
duke@435 1531 ec = (FreeChunk*)chunk;
duke@435 1532 }
duke@435 1533 }
duke@435 1534 ec->setSize(size);
duke@435 1535 debug_only(ec->mangleFreed(size));
duke@435 1536 if (size < SmallForDictionary) {
duke@435 1537 lock = _indexedFreeListParLocks[size];
duke@435 1538 }
duke@435 1539 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1540 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1541 // record the birth under the lock since the recording involves
duke@435 1542 // manipulation of the list on which the chunk lives and
duke@435 1543 // if the chunk is allocated and is the last on the list,
duke@435 1544 // the list can go away.
duke@435 1545 coalBirth(size);
duke@435 1546 }
duke@435 1547
duke@435 1548 void
duke@435 1549 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1550 size_t size) {
duke@435 1551 // check that the chunk does lie in this space!
duke@435 1552 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1553 assert_locked();
duke@435 1554 _bt.verify_single_block(chunk, size);
duke@435 1555
duke@435 1556 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1557 fc->setSize(size);
duke@435 1558 debug_only(fc->mangleFreed(size));
duke@435 1559 if (size < SmallForDictionary) {
duke@435 1560 returnChunkToFreeList(fc);
duke@435 1561 } else {
duke@435 1562 returnChunkToDictionary(fc);
duke@435 1563 }
duke@435 1564 }
duke@435 1565
duke@435 1566 void
duke@435 1567 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1568 size_t size, bool coalesced) {
duke@435 1569 assert_locked();
duke@435 1570 assert(chunk != NULL, "null chunk");
duke@435 1571 if (coalesced) {
duke@435 1572 // repair BOT
duke@435 1573 _bt.single_block(chunk, size);
duke@435 1574 }
duke@435 1575 addChunkToFreeLists(chunk, size);
duke@435 1576 }
duke@435 1577
duke@435 1578 // We _must_ find the purported chunk on our free lists;
duke@435 1579 // we assert if we don't.
duke@435 1580 void
duke@435 1581 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1582 size_t size = fc->size();
duke@435 1583 assert_locked();
duke@435 1584 debug_only(verifyFreeLists());
duke@435 1585 if (size < SmallForDictionary) {
duke@435 1586 removeChunkFromIndexedFreeList(fc);
duke@435 1587 } else {
duke@435 1588 removeChunkFromDictionary(fc);
duke@435 1589 }
duke@435 1590 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1591 debug_only(verifyFreeLists());
duke@435 1592 }
duke@435 1593
duke@435 1594 void
duke@435 1595 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1596 size_t size = fc->size();
duke@435 1597 assert_locked();
duke@435 1598 assert(fc != NULL, "null chunk");
duke@435 1599 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1600 _dictionary->removeChunk(fc);
duke@435 1601 // adjust _unallocated_block upward, as necessary
duke@435 1602 _bt.allocated((HeapWord*)fc, size);
duke@435 1603 }
duke@435 1604
duke@435 1605 void
duke@435 1606 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1607 assert_locked();
duke@435 1608 size_t size = fc->size();
duke@435 1609 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1610 NOT_PRODUCT(
duke@435 1611 if (FLSVerifyIndexTable) {
duke@435 1612 verifyIndexedFreeList(size);
duke@435 1613 }
duke@435 1614 )
duke@435 1615 _indexedFreeList[size].removeChunk(fc);
duke@435 1616 debug_only(fc->clearNext());
duke@435 1617 debug_only(fc->clearPrev());
duke@435 1618 NOT_PRODUCT(
duke@435 1619 if (FLSVerifyIndexTable) {
duke@435 1620 verifyIndexedFreeList(size);
duke@435 1621 }
duke@435 1622 )
duke@435 1623 }
duke@435 1624
duke@435 1625 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1626 /* A hint is the next larger size that has a surplus.
duke@435 1627 Start search at a size large enough to guarantee that
duke@435 1628 the excess is >= MIN_CHUNK. */
duke@435 1629 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1630 if (start < IndexSetSize) {
duke@435 1631 FreeList* it = _indexedFreeList;
duke@435 1632 size_t hint = _indexedFreeList[start].hint();
duke@435 1633 while (hint < IndexSetSize) {
duke@435 1634 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1635 FreeList *fl = &_indexedFreeList[hint];
duke@435 1636 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1637 // Found a list with surplus, reset original hint
duke@435 1638 // and split out a free chunk which is returned.
duke@435 1639 _indexedFreeList[start].set_hint(hint);
duke@435 1640 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1641 assert(res == NULL || res->isFree(),
duke@435 1642 "Should be returning a free chunk");
duke@435 1643 return res;
duke@435 1644 }
duke@435 1645 hint = fl->hint(); /* keep looking */
duke@435 1646 }
duke@435 1647 /* None found. */
duke@435 1648 it[start].set_hint(IndexSetSize);
duke@435 1649 }
duke@435 1650 return NULL;
duke@435 1651 }
duke@435 1652
duke@435 1653 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1654 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1655 size_t numWords) {
duke@435 1656 FreeChunk *curr = fl->head();
duke@435 1657 size_t oldNumWords = curr->size();
duke@435 1658 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1659 assert(curr != NULL, "List is empty");
duke@435 1660 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1661 "Size of chunks in the list is too small");
duke@435 1662
duke@435 1663 fl->removeChunk(curr);
duke@435 1664 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1665 // smallSplit(oldNumWords, numWords);
duke@435 1666 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1667 // Does anything have to be done for the remainder in terms of
duke@435 1668 // fixing the card table?
duke@435 1669 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1670 "Should be returning a free chunk");
duke@435 1671 return new_chunk;
duke@435 1672 }
duke@435 1673
duke@435 1674 FreeChunk*
duke@435 1675 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1676 size_t new_size) {
duke@435 1677 assert_locked();
duke@435 1678 size_t size = chunk->size();
duke@435 1679 assert(size > new_size, "Split from a smaller block?");
duke@435 1680 assert(is_aligned(chunk), "alignment problem");
duke@435 1681 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1682 size_t rem_size = size - new_size;
duke@435 1683 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1684 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1685 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1686 assert(is_aligned(ffc), "alignment problem");
duke@435 1687 ffc->setSize(rem_size);
duke@435 1688 ffc->linkNext(NULL);
duke@435 1689 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1690 // Above must occur before BOT is updated below.
duke@435 1691 // adjust block offset table
duke@435 1692 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1693 if (rem_size < SmallForDictionary) {
duke@435 1694 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1695 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1696 returnChunkToFreeList(ffc);
duke@435 1697 split(size, rem_size);
duke@435 1698 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1699 } else {
duke@435 1700 returnChunkToDictionary(ffc);
duke@435 1701 split(size ,rem_size);
duke@435 1702 }
duke@435 1703 chunk->setSize(new_size);
duke@435 1704 return chunk;
duke@435 1705 }
duke@435 1706
duke@435 1707 void
duke@435 1708 CompactibleFreeListSpace::sweep_completed() {
duke@435 1709 // Now that space is probably plentiful, refill linear
duke@435 1710 // allocation blocks as needed.
duke@435 1711 refillLinearAllocBlocksIfNeeded();
duke@435 1712 }
duke@435 1713
duke@435 1714 void
duke@435 1715 CompactibleFreeListSpace::gc_prologue() {
duke@435 1716 assert_locked();
duke@435 1717 if (PrintFLSStatistics != 0) {
duke@435 1718 gclog_or_tty->print("Before GC:\n");
duke@435 1719 reportFreeListStatistics();
duke@435 1720 }
duke@435 1721 refillLinearAllocBlocksIfNeeded();
duke@435 1722 }
duke@435 1723
duke@435 1724 void
duke@435 1725 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1726 assert_locked();
duke@435 1727 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1728 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1729 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1730 }
duke@435 1731 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1732 _promoInfo.stopTrackingPromotions();
duke@435 1733 repairLinearAllocationBlocks();
duke@435 1734 // Print Space's stats
duke@435 1735 if (PrintFLSStatistics != 0) {
duke@435 1736 gclog_or_tty->print("After GC:\n");
duke@435 1737 reportFreeListStatistics();
duke@435 1738 }
duke@435 1739 }
duke@435 1740
duke@435 1741 // Iteration support, mostly delegated from a CMS generation
duke@435 1742
duke@435 1743 void CompactibleFreeListSpace::save_marks() {
duke@435 1744 // mark the "end" of the used space at the time of this call;
duke@435 1745 // note, however, that promoted objects from this point
duke@435 1746 // on are tracked in the _promoInfo below.
duke@435 1747 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
duke@435 1748 unallocated_block() : end());
duke@435 1749 // inform allocator that promotions should be tracked.
duke@435 1750 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1751 _promoInfo.startTrackingPromotions();
duke@435 1752 }
duke@435 1753
duke@435 1754 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1755 assert(_promoInfo.tracking(), "No preceding save_marks?");
duke@435 1756 guarantee(SharedHeap::heap()->n_par_threads() == 0,
duke@435 1757 "Shouldn't be called (yet) during parallel part of gc.");
duke@435 1758 return _promoInfo.noPromotions();
duke@435 1759 }
duke@435 1760
duke@435 1761 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 1762 \
duke@435 1763 void CompactibleFreeListSpace:: \
duke@435 1764 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 1765 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 1766 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 1767 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 1768 /* \
duke@435 1769 * This also restores any displaced headers and removes the elements from \
duke@435 1770 * the iteration set as they are processed, so that we have a clean slate \
duke@435 1771 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 1772 * promoted as a result of the iteration they are iterated over as well. \
duke@435 1773 */ \
duke@435 1774 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 1775 }
duke@435 1776
duke@435 1777 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 1778
duke@435 1779 //////////////////////////////////////////////////////////////////////////////
duke@435 1780 // We go over the list of promoted objects, removing each from the list,
duke@435 1781 // and applying the closure (this may, in turn, add more elements to
duke@435 1782 // the tail of the promoted list, and these newly added objects will
duke@435 1783 // also be processed) until the list is empty.
duke@435 1784 // To aid verification and debugging, in the non-product builds
duke@435 1785 // we actually forward _promoHead each time we process a promoted oop.
duke@435 1786 // Note that this is not necessary in general (i.e. when we don't need to
duke@435 1787 // call PromotionInfo::verify()) because oop_iterate can only add to the
duke@435 1788 // end of _promoTail, and never needs to look at _promoHead.
duke@435 1789
duke@435 1790 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 1791 \
duke@435 1792 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
duke@435 1793 NOT_PRODUCT(verify()); \
duke@435 1794 PromotedObject *curObj, *nextObj; \
duke@435 1795 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
duke@435 1796 if ((nextObj = curObj->next()) == NULL) { \
duke@435 1797 /* protect ourselves against additions due to closure application \
duke@435 1798 below by resetting the list. */ \
duke@435 1799 assert(_promoTail == curObj, "Should have been the tail"); \
duke@435 1800 _promoHead = _promoTail = NULL; \
duke@435 1801 } \
duke@435 1802 if (curObj->hasDisplacedMark()) { \
duke@435 1803 /* restore displaced header */ \
duke@435 1804 oop(curObj)->set_mark(nextDisplacedHeader()); \
duke@435 1805 } else { \
duke@435 1806 /* restore prototypical header */ \
duke@435 1807 oop(curObj)->init_mark(); \
duke@435 1808 } \
duke@435 1809 /* The "promoted_mark" should now not be set */ \
duke@435 1810 assert(!curObj->hasPromotedMark(), \
duke@435 1811 "Should have been cleared by restoring displaced mark-word"); \
duke@435 1812 NOT_PRODUCT(_promoHead = nextObj); \
duke@435 1813 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
duke@435 1814 if (nextObj == NULL) { /* start at head of list reset above */ \
duke@435 1815 nextObj = _promoHead; \
duke@435 1816 } \
duke@435 1817 } \
duke@435 1818 assert(noPromotions(), "post-condition violation"); \
duke@435 1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
duke@435 1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
duke@435 1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
duke@435 1822 }
duke@435 1823
duke@435 1824 // This should have been ALL_SINCE_...() just like the others,
duke@435 1825 // but, because the body of the method above is somehwat longer,
duke@435 1826 // the MSVC compiler cannot cope; as a workaround, we split the
duke@435 1827 // macro into its 3 constituent parts below (see original macro
duke@435 1828 // definition in specializedOopClosures.hpp).
duke@435 1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
duke@435 1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
duke@435 1831
duke@435 1832
duke@435 1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 1834 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 1835 guarantee(false, "NYI");
duke@435 1836 }
duke@435 1837
ysr@447 1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 1839 return _smallLinearAllocBlock._word_size == 0;
duke@435 1840 }
duke@435 1841
duke@435 1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 1843 // Fix up linear allocation blocks to look like free blocks
duke@435 1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 1845 }
duke@435 1846
duke@435 1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1848 assert_locked();
duke@435 1849 if (blk->_ptr != NULL) {
duke@435 1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 1851 "Minimum block size requirement");
duke@435 1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 1853 fc->setSize(blk->_word_size);
duke@435 1854 fc->linkPrev(NULL); // mark as free
duke@435 1855 fc->dontCoalesce();
duke@435 1856 assert(fc->isFree(), "just marked it free");
duke@435 1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 1858 }
duke@435 1859 }
duke@435 1860
duke@435 1861 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 1862 assert_locked();
duke@435 1863 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 1864 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 1865 "Size of linAB should be zero if the ptr is NULL");
duke@435 1866 // Reset the linAB refill and allocation size limit.
duke@435 1867 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 1868 }
duke@435 1869 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 1870 }
duke@435 1871
duke@435 1872 void
duke@435 1873 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 1874 assert_locked();
duke@435 1875 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 1876 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 1877 "blk invariant");
duke@435 1878 if (blk->_ptr == NULL) {
duke@435 1879 refillLinearAllocBlock(blk);
duke@435 1880 }
duke@435 1881 if (PrintMiscellaneous && Verbose) {
duke@435 1882 if (blk->_word_size == 0) {
duke@435 1883 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 1884 }
duke@435 1885 }
duke@435 1886 }
duke@435 1887
duke@435 1888 void
duke@435 1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 1890 assert_locked();
duke@435 1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 1892 "linear allocation block should be empty");
duke@435 1893 FreeChunk* fc;
duke@435 1894 if (blk->_refillSize < SmallForDictionary &&
duke@435 1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 1896 // A linAB's strategy might be to use small sizes to reduce
duke@435 1897 // fragmentation but still get the benefits of allocation from a
duke@435 1898 // linAB.
duke@435 1899 } else {
duke@435 1900 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 1901 }
duke@435 1902 if (fc != NULL) {
duke@435 1903 blk->_ptr = (HeapWord*)fc;
duke@435 1904 blk->_word_size = fc->size();
duke@435 1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 1906 }
duke@435 1907 }
duke@435 1908
ysr@447 1909 // Support for concurrent collection policy decisions.
ysr@447 1910 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 1911 // In the future we might want to add in frgamentation stats --
ysr@447 1912 // including erosion of the "mountain" into this decision as well.
ysr@447 1913 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 1914 }
ysr@447 1915
duke@435 1916 // Support for compaction
duke@435 1917
duke@435 1918 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 1919 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 1920 // prepare_for_compaction() uses the space between live objects
duke@435 1921 // so that later phase can skip dead space quickly. So verification
duke@435 1922 // of the free lists doesn't work after.
duke@435 1923 }
duke@435 1924
duke@435 1925 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 1926 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 1927
duke@435 1928 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 1929 // In other versions of adjust_pointers(), a bail out
duke@435 1930 // based on the amount of live data in the generation
duke@435 1931 // (i.e., if 0, bail out) may be used.
duke@435 1932 // Cannot test used() == 0 here because the free lists have already
duke@435 1933 // been mangled by the compaction.
duke@435 1934
duke@435 1935 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 1936 // See note about verification in prepare_for_compaction().
duke@435 1937 }
duke@435 1938
duke@435 1939 void CompactibleFreeListSpace::compact() {
duke@435 1940 SCAN_AND_COMPACT(obj_size);
duke@435 1941 }
duke@435 1942
duke@435 1943 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 1944 // where fbs is free block sizes
duke@435 1945 double CompactibleFreeListSpace::flsFrag() const {
duke@435 1946 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 1947 double frag = 0.0;
duke@435 1948 size_t i;
duke@435 1949
duke@435 1950 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1951 double sz = i;
duke@435 1952 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 1953 }
duke@435 1954
duke@435 1955 double totFree = itabFree +
duke@435 1956 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 1957 if (totFree > 0) {
duke@435 1958 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 1959 (totFree * totFree));
duke@435 1960 frag = (double)1.0 - frag;
duke@435 1961 } else {
duke@435 1962 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 1963 }
duke@435 1964 return frag;
duke@435 1965 }
duke@435 1966
duke@435 1967 #define CoalSurplusPercent 1.05
duke@435 1968 #define SplitSurplusPercent 1.10
duke@435 1969
duke@435 1970 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 1971 float inter_sweep_current,
duke@435 1972 float inter_sweep_estimate) {
duke@435 1973 assert_locked();
duke@435 1974 size_t i;
duke@435 1975 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1976 FreeList* fl = &_indexedFreeList[i];
duke@435 1977 fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
duke@435 1978 fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
duke@435 1979 fl->set_beforeSweep(fl->count());
duke@435 1980 fl->set_bfrSurp(fl->surplus());
duke@435 1981 }
duke@435 1982 _dictionary->beginSweepDictCensus(CoalSurplusPercent,
duke@435 1983 inter_sweep_current,
duke@435 1984 inter_sweep_estimate);
duke@435 1985 }
duke@435 1986
duke@435 1987 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 1988 assert_locked();
duke@435 1989 size_t i;
duke@435 1990 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1991 FreeList *fl = &_indexedFreeList[i];
duke@435 1992 fl->set_surplus(fl->count() -
duke@435 1993 (ssize_t)((double)fl->desired() * SplitSurplusPercent));
duke@435 1994 }
duke@435 1995 }
duke@435 1996
duke@435 1997 void CompactibleFreeListSpace::setFLHints() {
duke@435 1998 assert_locked();
duke@435 1999 size_t i;
duke@435 2000 size_t h = IndexSetSize;
duke@435 2001 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2002 FreeList *fl = &_indexedFreeList[i];
duke@435 2003 fl->set_hint(h);
duke@435 2004 if (fl->surplus() > 0) {
duke@435 2005 h = i;
duke@435 2006 }
duke@435 2007 }
duke@435 2008 }
duke@435 2009
duke@435 2010 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2011 assert_locked();
duke@435 2012 int i;
duke@435 2013 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2014 FreeList *fl = &_indexedFreeList[i];
duke@435 2015 fl->set_prevSweep(fl->count());
duke@435 2016 fl->set_coalBirths(0);
duke@435 2017 fl->set_coalDeaths(0);
duke@435 2018 fl->set_splitBirths(0);
duke@435 2019 fl->set_splitDeaths(0);
duke@435 2020 }
duke@435 2021 }
duke@435 2022
ysr@447 2023 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
duke@435 2024 setFLSurplus();
duke@435 2025 setFLHints();
duke@435 2026 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2027 printFLCensus(sweep_count);
duke@435 2028 }
duke@435 2029 clearFLCensus();
duke@435 2030 assert_locked();
duke@435 2031 _dictionary->endSweepDictCensus(SplitSurplusPercent);
duke@435 2032 }
duke@435 2033
duke@435 2034 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2035 if (size < SmallForDictionary) {
duke@435 2036 FreeList *fl = &_indexedFreeList[size];
duke@435 2037 return (fl->coalDesired() < 0) ||
duke@435 2038 ((int)fl->count() > fl->coalDesired());
duke@435 2039 } else {
duke@435 2040 return dictionary()->coalDictOverPopulated(size);
duke@435 2041 }
duke@435 2042 }
duke@435 2043
duke@435 2044 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2045 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2046 FreeList *fl = &_indexedFreeList[size];
duke@435 2047 fl->increment_coalBirths();
duke@435 2048 fl->increment_surplus();
duke@435 2049 }
duke@435 2050
duke@435 2051 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2052 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2053 FreeList *fl = &_indexedFreeList[size];
duke@435 2054 fl->increment_coalDeaths();
duke@435 2055 fl->decrement_surplus();
duke@435 2056 }
duke@435 2057
duke@435 2058 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2059 if (size < SmallForDictionary) {
duke@435 2060 smallCoalBirth(size);
duke@435 2061 } else {
duke@435 2062 dictionary()->dictCensusUpdate(size,
duke@435 2063 false /* split */,
duke@435 2064 true /* birth */);
duke@435 2065 }
duke@435 2066 }
duke@435 2067
duke@435 2068 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2069 if(size < SmallForDictionary) {
duke@435 2070 smallCoalDeath(size);
duke@435 2071 } else {
duke@435 2072 dictionary()->dictCensusUpdate(size,
duke@435 2073 false /* split */,
duke@435 2074 false /* birth */);
duke@435 2075 }
duke@435 2076 }
duke@435 2077
duke@435 2078 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2079 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2080 FreeList *fl = &_indexedFreeList[size];
duke@435 2081 fl->increment_splitBirths();
duke@435 2082 fl->increment_surplus();
duke@435 2083 }
duke@435 2084
duke@435 2085 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2086 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2087 FreeList *fl = &_indexedFreeList[size];
duke@435 2088 fl->increment_splitDeaths();
duke@435 2089 fl->decrement_surplus();
duke@435 2090 }
duke@435 2091
duke@435 2092 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2093 if (size < SmallForDictionary) {
duke@435 2094 smallSplitBirth(size);
duke@435 2095 } else {
duke@435 2096 dictionary()->dictCensusUpdate(size,
duke@435 2097 true /* split */,
duke@435 2098 true /* birth */);
duke@435 2099 }
duke@435 2100 }
duke@435 2101
duke@435 2102 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2103 if (size < SmallForDictionary) {
duke@435 2104 smallSplitDeath(size);
duke@435 2105 } else {
duke@435 2106 dictionary()->dictCensusUpdate(size,
duke@435 2107 true /* split */,
duke@435 2108 false /* birth */);
duke@435 2109 }
duke@435 2110 }
duke@435 2111
duke@435 2112 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2113 size_t to2 = from - to1;
duke@435 2114 splitDeath(from);
duke@435 2115 splitBirth(to1);
duke@435 2116 splitBirth(to2);
duke@435 2117 }
duke@435 2118
duke@435 2119 void CompactibleFreeListSpace::print() const {
duke@435 2120 tty->print(" CompactibleFreeListSpace");
duke@435 2121 Space::print();
duke@435 2122 }
duke@435 2123
duke@435 2124 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2125 assert_locked();
duke@435 2126 repairLinearAllocationBlocks();
duke@435 2127 // Verify that the SpoolBlocks look like free blocks of
duke@435 2128 // appropriate sizes... To be done ...
duke@435 2129 }
duke@435 2130
duke@435 2131 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2132 private:
duke@435 2133 const CompactibleFreeListSpace* _sp;
duke@435 2134 const MemRegion _span;
duke@435 2135
duke@435 2136 public:
duke@435 2137 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
duke@435 2138 MemRegion span) : _sp(sp), _span(span) { }
duke@435 2139
coleenp@548 2140 virtual size_t do_blk(HeapWord* addr) {
duke@435 2141 size_t res;
duke@435 2142 if (_sp->block_is_obj(addr)) {
duke@435 2143 oop p = oop(addr);
duke@435 2144 guarantee(p->is_oop(), "Should be an oop");
duke@435 2145 res = _sp->adjustObjectSize(p->size());
duke@435 2146 if (_sp->obj_is_alive(addr)) {
duke@435 2147 p->verify();
duke@435 2148 }
duke@435 2149 } else {
duke@435 2150 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2151 res = fc->size();
duke@435 2152 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2153 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2154 "Chunk should be on a free list");
duke@435 2155 }
duke@435 2156 }
duke@435 2157 guarantee(res != 0, "Livelock: no rank reduction!");
duke@435 2158 return res;
duke@435 2159 }
duke@435 2160 };
duke@435 2161
duke@435 2162 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2163 private:
duke@435 2164 const CMSCollector* _collector;
duke@435 2165 const CompactibleFreeListSpace* _sp;
duke@435 2166 const MemRegion _span;
duke@435 2167 const bool _past_remark;
duke@435 2168 const CMSBitMap* _bit_map;
duke@435 2169
coleenp@548 2170 protected:
coleenp@548 2171 void do_oop(void* p, oop obj) {
coleenp@548 2172 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2173 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2174 // Should be a valid object; the first disjunct below allows
coleenp@548 2175 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2176 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2177 // are spanned by _span (CMS heap) above.
coleenp@548 2178 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2179 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2180 "Should be an object");
coleenp@548 2181 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2182 obj->verify();
coleenp@548 2183 if (_past_remark) {
coleenp@548 2184 // Remark has been completed, the object should be marked
coleenp@548 2185 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2186 }
coleenp@548 2187 } else { // reference within CMS heap
coleenp@548 2188 if (_past_remark) {
coleenp@548 2189 // Remark has been completed -- so the referent should have
coleenp@548 2190 // been marked, if referring object is.
coleenp@548 2191 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2192 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2193 }
coleenp@548 2194 }
coleenp@548 2195 }
coleenp@548 2196 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2197 // the reference is from FLS, and points out of FLS
coleenp@548 2198 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2199 obj->verify();
coleenp@548 2200 }
coleenp@548 2201 }
coleenp@548 2202
coleenp@548 2203 template <class T> void do_oop_work(T* p) {
coleenp@548 2204 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2205 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2206 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2207 do_oop(p, obj);
coleenp@548 2208 }
coleenp@548 2209 }
coleenp@548 2210
duke@435 2211 public:
duke@435 2212 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2213 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2214 bool past_remark, CMSBitMap* bit_map) :
duke@435 2215 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2216 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2217
coleenp@548 2218 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2219 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2220 };
duke@435 2221
duke@435 2222 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2223 assert_lock_strong(&_freelistLock);
duke@435 2224 verify_objects_initialized();
duke@435 2225 MemRegion span = _collector->_span;
duke@435 2226 bool past_remark = (_collector->abstract_state() ==
duke@435 2227 CMSCollector::Sweeping);
duke@435 2228
duke@435 2229 ResourceMark rm;
duke@435 2230 HandleMark hm;
duke@435 2231
duke@435 2232 // Check integrity of CFL data structures
duke@435 2233 _promoInfo.verify();
duke@435 2234 _dictionary->verify();
duke@435 2235 if (FLSVerifyIndexTable) {
duke@435 2236 verifyIndexedFreeLists();
duke@435 2237 }
duke@435 2238 // Check integrity of all objects and free blocks in space
duke@435 2239 {
duke@435 2240 VerifyAllBlksClosure cl(this, span);
duke@435 2241 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2242 }
duke@435 2243 // Check that all references in the heap to FLS
duke@435 2244 // are to valid objects in FLS or that references in
duke@435 2245 // FLS are to valid objects elsewhere in the heap
duke@435 2246 if (FLSVerifyAllHeapReferences)
duke@435 2247 {
duke@435 2248 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2249 _collector->markBitMap());
duke@435 2250 CollectedHeap* ch = Universe::heap();
duke@435 2251 ch->oop_iterate(&cl); // all oops in generations
duke@435 2252 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2253 }
duke@435 2254
duke@435 2255 if (VerifyObjectStartArray) {
duke@435 2256 // Verify the block offset table
duke@435 2257 _bt.verify();
duke@435 2258 }
duke@435 2259 }
duke@435 2260
duke@435 2261 #ifndef PRODUCT
duke@435 2262 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2263 if (FLSVerifyLists) {
duke@435 2264 _dictionary->verify();
duke@435 2265 verifyIndexedFreeLists();
duke@435 2266 } else {
duke@435 2267 if (FLSVerifyDictionary) {
duke@435 2268 _dictionary->verify();
duke@435 2269 }
duke@435 2270 if (FLSVerifyIndexTable) {
duke@435 2271 verifyIndexedFreeLists();
duke@435 2272 }
duke@435 2273 }
duke@435 2274 }
duke@435 2275 #endif
duke@435 2276
duke@435 2277 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2278 size_t i = 0;
duke@435 2279 for (; i < MinChunkSize; i++) {
duke@435 2280 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2281 }
duke@435 2282 for (; i < IndexSetSize; i++) {
duke@435 2283 verifyIndexedFreeList(i);
duke@435 2284 }
duke@435 2285 }
duke@435 2286
duke@435 2287 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
duke@435 2288 guarantee(size % 2 == 0, "Odd slots should be empty");
duke@435 2289 for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
duke@435 2290 fc = fc->next()) {
duke@435 2291 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2292 guarantee(fc->isFree(), "!free?");
duke@435 2293 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
duke@435 2294 }
duke@435 2295 }
duke@435 2296
duke@435 2297 #ifndef PRODUCT
duke@435 2298 void CompactibleFreeListSpace::checkFreeListConsistency() const {
duke@435 2299 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2300 "Some sizes can't be allocated without recourse to"
duke@435 2301 " linear allocation buffers");
duke@435 2302 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2303 "else MIN_TREE_CHUNK_SIZE is wrong");
duke@435 2304 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
duke@435 2305 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
duke@435 2306 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2307 "Some for-loops may be incorrectly initialized");
duke@435 2308 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2309 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2310 }
duke@435 2311 #endif
duke@435 2312
ysr@447 2313 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2314 assert_lock_strong(&_freelistLock);
ysr@447 2315 FreeList total;
ysr@447 2316 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2317 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2318 size_t totalFree = 0;
duke@435 2319 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2320 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2321 totalFree += fl->count() * fl->size();
ysr@447 2322 if (i % (40*IndexSetStride) == 0) {
ysr@447 2323 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2324 }
ysr@447 2325 fl->print_on(gclog_or_tty);
ysr@447 2326 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2327 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2328 total.set_desired( total.desired() + fl->desired() );
ysr@447 2329 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2330 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2331 total.set_count( total.count() + fl->count() );
ysr@447 2332 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2333 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2334 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2335 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2336 }
ysr@447 2337 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2338 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2339 SIZE_FORMAT " words", totalFree);
duke@435 2340 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2341 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2342 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2343 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2344 _dictionary->printDictCensus();
duke@435 2345 }
duke@435 2346
duke@435 2347 // Return the next displaced header, incrementing the pointer and
duke@435 2348 // recycling spool area as necessary.
duke@435 2349 markOop PromotionInfo::nextDisplacedHeader() {
duke@435 2350 assert(_spoolHead != NULL, "promotionInfo inconsistency");
duke@435 2351 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
duke@435 2352 "Empty spool space: no displaced header can be fetched");
duke@435 2353 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
duke@435 2354 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
duke@435 2355 // Spool forward
duke@435 2356 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
duke@435 2357 // forward to next block, recycling this block into spare spool buffer
duke@435 2358 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
duke@435 2359 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
duke@435 2360 _spoolHead->nextSpoolBlock = _spareSpool;
duke@435 2361 _spareSpool = _spoolHead;
duke@435 2362 _spoolHead = tmp;
duke@435 2363 _firstIndex = 1;
duke@435 2364 NOT_PRODUCT(
duke@435 2365 if (_spoolHead == NULL) { // all buffers fully consumed
duke@435 2366 assert(_spoolTail == NULL && _nextIndex == 1,
duke@435 2367 "spool buffers processing inconsistency");
duke@435 2368 }
duke@435 2369 )
duke@435 2370 }
duke@435 2371 return hdr;
duke@435 2372 }
duke@435 2373
duke@435 2374 void PromotionInfo::track(PromotedObject* trackOop) {
duke@435 2375 track(trackOop, oop(trackOop)->klass());
duke@435 2376 }
duke@435 2377
duke@435 2378 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
duke@435 2379 // make a copy of header as it may need to be spooled
duke@435 2380 markOop mark = oop(trackOop)->mark();
duke@435 2381 trackOop->clearNext();
duke@435 2382 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
duke@435 2383 // save non-prototypical header, and mark oop
duke@435 2384 saveDisplacedHeader(mark);
duke@435 2385 trackOop->setDisplacedMark();
duke@435 2386 } else {
duke@435 2387 // we'd like to assert something like the following:
duke@435 2388 // assert(mark == markOopDesc::prototype(), "consistency check");
duke@435 2389 // ... but the above won't work because the age bits have not (yet) been
duke@435 2390 // cleared. The remainder of the check would be identical to the
duke@435 2391 // condition checked in must_be_preserved() above, so we don't really
duke@435 2392 // have anything useful to check here!
duke@435 2393 }
duke@435 2394 if (_promoTail != NULL) {
duke@435 2395 assert(_promoHead != NULL, "List consistency");
duke@435 2396 _promoTail->setNext(trackOop);
duke@435 2397 _promoTail = trackOop;
duke@435 2398 } else {
duke@435 2399 assert(_promoHead == NULL, "List consistency");
duke@435 2400 _promoHead = _promoTail = trackOop;
duke@435 2401 }
duke@435 2402 // Mask as newly promoted, so we can skip over such objects
duke@435 2403 // when scanning dirty cards
duke@435 2404 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
duke@435 2405 trackOop->setPromotedMark();
duke@435 2406 }
duke@435 2407
duke@435 2408 // Save the given displaced header, incrementing the pointer and
duke@435 2409 // obtaining more spool area as necessary.
duke@435 2410 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
duke@435 2411 assert(_spoolHead != NULL && _spoolTail != NULL,
duke@435 2412 "promotionInfo inconsistency");
duke@435 2413 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
duke@435 2414 _spoolTail->displacedHdr[_nextIndex] = hdr;
duke@435 2415 // Spool forward
duke@435 2416 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
duke@435 2417 // get a new spooling block
duke@435 2418 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
duke@435 2419 _splice_point = _spoolTail; // save for splicing
duke@435 2420 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
duke@435 2421 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
duke@435 2422 // ... but will attempt filling before next promotion attempt
duke@435 2423 _nextIndex = 1;
duke@435 2424 }
duke@435 2425 }
duke@435 2426
duke@435 2427 // Ensure that spooling space exists. Return false if spooling space
duke@435 2428 // could not be obtained.
duke@435 2429 bool PromotionInfo::ensure_spooling_space_work() {
duke@435 2430 assert(!has_spooling_space(), "Only call when there is no spooling space");
duke@435 2431 // Try and obtain more spooling space
duke@435 2432 SpoolBlock* newSpool = getSpoolBlock();
duke@435 2433 assert(newSpool == NULL ||
duke@435 2434 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
duke@435 2435 "getSpoolBlock() sanity check");
duke@435 2436 if (newSpool == NULL) {
duke@435 2437 return false;
duke@435 2438 }
duke@435 2439 _nextIndex = 1;
duke@435 2440 if (_spoolTail == NULL) {
duke@435 2441 _spoolTail = newSpool;
duke@435 2442 if (_spoolHead == NULL) {
duke@435 2443 _spoolHead = newSpool;
duke@435 2444 _firstIndex = 1;
duke@435 2445 } else {
duke@435 2446 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
duke@435 2447 "Splice point invariant");
duke@435 2448 // Extra check that _splice_point is connected to list
duke@435 2449 #ifdef ASSERT
duke@435 2450 {
duke@435 2451 SpoolBlock* blk = _spoolHead;
duke@435 2452 for (; blk->nextSpoolBlock != NULL;
duke@435 2453 blk = blk->nextSpoolBlock);
duke@435 2454 assert(blk != NULL && blk == _splice_point,
duke@435 2455 "Splice point incorrect");
duke@435 2456 }
duke@435 2457 #endif // ASSERT
duke@435 2458 _splice_point->nextSpoolBlock = newSpool;
duke@435 2459 }
duke@435 2460 } else {
duke@435 2461 assert(_spoolHead != NULL, "spool list consistency");
duke@435 2462 _spoolTail->nextSpoolBlock = newSpool;
duke@435 2463 _spoolTail = newSpool;
duke@435 2464 }
duke@435 2465 return true;
duke@435 2466 }
duke@435 2467
duke@435 2468 // Get a free spool buffer from the free pool, getting a new block
duke@435 2469 // from the heap if necessary.
duke@435 2470 SpoolBlock* PromotionInfo::getSpoolBlock() {
duke@435 2471 SpoolBlock* res;
duke@435 2472 if ((res = _spareSpool) != NULL) {
duke@435 2473 _spareSpool = _spareSpool->nextSpoolBlock;
duke@435 2474 res->nextSpoolBlock = NULL;
duke@435 2475 } else { // spare spool exhausted, get some from heap
duke@435 2476 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
duke@435 2477 if (res != NULL) {
duke@435 2478 res->init();
duke@435 2479 }
duke@435 2480 }
duke@435 2481 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
duke@435 2482 return res;
duke@435 2483 }
duke@435 2484
duke@435 2485 void PromotionInfo::startTrackingPromotions() {
duke@435 2486 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2487 "spooling inconsistency?");
duke@435 2488 _firstIndex = _nextIndex = 1;
duke@435 2489 _tracking = true;
duke@435 2490 }
duke@435 2491
duke@435 2492 void PromotionInfo::stopTrackingPromotions() {
duke@435 2493 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
duke@435 2494 "spooling inconsistency?");
duke@435 2495 _firstIndex = _nextIndex = 1;
duke@435 2496 _tracking = false;
duke@435 2497 }
duke@435 2498
duke@435 2499 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
duke@435 2500 // points to the next slot available for filling.
duke@435 2501 // The set of slots holding displaced headers are then all those in the
duke@435 2502 // right-open interval denoted by:
duke@435 2503 //
duke@435 2504 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
duke@435 2505 //
duke@435 2506 // When _spoolTail is NULL, then the set of slots with displaced headers
duke@435 2507 // is all those starting at the slot <_spoolHead, _firstIndex> and
duke@435 2508 // going up to the last slot of last block in the linked list.
duke@435 2509 // In this lartter case, _splice_point points to the tail block of
duke@435 2510 // this linked list of blocks holding displaced headers.
duke@435 2511 void PromotionInfo::verify() const {
duke@435 2512 // Verify the following:
duke@435 2513 // 1. the number of displaced headers matches the number of promoted
duke@435 2514 // objects that have displaced headers
duke@435 2515 // 2. each promoted object lies in this space
duke@435 2516 debug_only(
duke@435 2517 PromotedObject* junk = NULL;
duke@435 2518 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
duke@435 2519 "Offset of PromotedObject::_next is expected to align with "
duke@435 2520 " the OopDesc::_mark within OopDesc");
duke@435 2521 )
duke@435 2522 // FIXME: guarantee????
duke@435 2523 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
duke@435 2524 _splice_point != NULL, "list consistency");
duke@435 2525 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
duke@435 2526 // count the number of objects with displaced headers
duke@435 2527 size_t numObjsWithDisplacedHdrs = 0;
duke@435 2528 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
duke@435 2529 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
duke@435 2530 // the last promoted object may fail the mark() != NULL test of is_oop().
duke@435 2531 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
duke@435 2532 if (curObj->hasDisplacedMark()) {
duke@435 2533 numObjsWithDisplacedHdrs++;
duke@435 2534 }
duke@435 2535 }
duke@435 2536 // Count the number of displaced headers
duke@435 2537 size_t numDisplacedHdrs = 0;
duke@435 2538 for (SpoolBlock* curSpool = _spoolHead;
duke@435 2539 curSpool != _spoolTail && curSpool != NULL;
duke@435 2540 curSpool = curSpool->nextSpoolBlock) {
duke@435 2541 // the first entry is just a self-pointer; indices 1 through
duke@435 2542 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
duke@435 2543 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
duke@435 2544 "first entry of displacedHdr should be self-referential");
duke@435 2545 numDisplacedHdrs += curSpool->bufferSize - 1;
duke@435 2546 }
duke@435 2547 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
duke@435 2548 "internal consistency");
duke@435 2549 guarantee(_spoolTail != NULL || _nextIndex == 1,
duke@435 2550 "Inconsistency between _spoolTail and _nextIndex");
duke@435 2551 // We overcounted (_firstIndex-1) worth of slots in block
duke@435 2552 // _spoolHead and we undercounted (_nextIndex-1) worth of
duke@435 2553 // slots in block _spoolTail. We make an appropriate
duke@435 2554 // adjustment by subtracting the first and adding the
duke@435 2555 // second: - (_firstIndex - 1) + (_nextIndex - 1)
duke@435 2556 numDisplacedHdrs += (_nextIndex - _firstIndex);
duke@435 2557 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
duke@435 2558 }
duke@435 2559
duke@435 2560
duke@435 2561 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2562 _cfls(cfls)
duke@435 2563 {
duke@435 2564 _blocks_to_claim = CMSParPromoteBlocksToClaim;
duke@435 2565 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2566 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2567 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2568 _indexedFreeList[i].set_size(i);
duke@435 2569 }
duke@435 2570 }
duke@435 2571
duke@435 2572 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2573 FreeChunk* res;
duke@435 2574 word_sz = _cfls->adjustObjectSize(word_sz);
duke@435 2575 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2576 // This locking manages sync with other large object allocations.
duke@435 2577 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2578 Mutex::_no_safepoint_check_flag);
duke@435 2579 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2580 if (res == NULL) return NULL;
duke@435 2581 } else {
duke@435 2582 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2583 bool filled = false; //TRAP
duke@435 2584 if (fl->count() == 0) {
duke@435 2585 bool filled = true; //TRAP
duke@435 2586 // Attempt to refill this local free list.
duke@435 2587 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
duke@435 2588 // If it didn't work, give up.
duke@435 2589 if (fl->count() == 0) return NULL;
duke@435 2590 }
duke@435 2591 res = fl->getChunkAtHead();
duke@435 2592 assert(res != NULL, "Why was count non-zero?");
duke@435 2593 }
duke@435 2594 res->markNotFree();
duke@435 2595 assert(!res->isFree(), "shouldn't be marked free");
duke@435 2596 assert(oop(res)->klass() == NULL, "should look uninitialized");
duke@435 2597 // mangle a just allocated object with a distinct pattern.
duke@435 2598 debug_only(res->mangleAllocated(word_sz));
duke@435 2599 return (HeapWord*)res;
duke@435 2600 }
duke@435 2601
duke@435 2602 void CFLS_LAB::retire() {
duke@435 2603 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2604 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2605 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2606 if (_indexedFreeList[i].count() > 0) {
duke@435 2607 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
duke@435 2608 Mutex::_no_safepoint_check_flag);
duke@435 2609 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
duke@435 2610 // Reset this list.
duke@435 2611 _indexedFreeList[i] = FreeList();
duke@435 2612 _indexedFreeList[i].set_size(i);
duke@435 2613 }
duke@435 2614 }
duke@435 2615 }
duke@435 2616
duke@435 2617 void
duke@435 2618 CompactibleFreeListSpace::
duke@435 2619 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2620 assert(fl->count() == 0, "Precondition.");
duke@435 2621 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2622 "Precondition");
duke@435 2623
duke@435 2624 // We'll try all multiples of word_sz in the indexed set (starting with
duke@435 2625 // word_sz itself), then try getting a big chunk and splitting it.
duke@435 2626 int k = 1;
duke@435 2627 size_t cur_sz = k * word_sz;
duke@435 2628 bool found = false;
duke@435 2629 while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
duke@435 2630 FreeList* gfl = &_indexedFreeList[cur_sz];
duke@435 2631 FreeList fl_for_cur_sz; // Empty.
duke@435 2632 fl_for_cur_sz.set_size(cur_sz);
duke@435 2633 {
duke@435 2634 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
duke@435 2635 Mutex::_no_safepoint_check_flag);
duke@435 2636 if (gfl->count() != 0) {
duke@435 2637 size_t nn = MAX2(n/k, (size_t)1);
duke@435 2638 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
duke@435 2639 found = true;
duke@435 2640 }
duke@435 2641 }
duke@435 2642 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
duke@435 2643 if (found) {
duke@435 2644 if (k == 1) {
duke@435 2645 fl->prepend(&fl_for_cur_sz);
duke@435 2646 } else {
duke@435 2647 // Divide each block on fl_for_cur_sz up k ways.
duke@435 2648 FreeChunk* fc;
duke@435 2649 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
duke@435 2650 // Must do this in reverse order, so that anybody attempting to
duke@435 2651 // access the main chunk sees it as a single free block until we
duke@435 2652 // change it.
duke@435 2653 size_t fc_size = fc->size();
duke@435 2654 for (int i = k-1; i >= 0; i--) {
duke@435 2655 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2656 ffc->setSize(word_sz);
duke@435 2657 ffc->linkNext(NULL);
duke@435 2658 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2659 // Above must occur before BOT is updated below.
duke@435 2660 // splitting from the right, fc_size == (k - i + 1) * wordsize
duke@435 2661 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 2662 fc_size -= word_sz;
duke@435 2663 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2664 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2665 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2666 // Push this on "fl".
duke@435 2667 fl->returnChunkAtHead(ffc);
duke@435 2668 }
duke@435 2669 // TRAP
duke@435 2670 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2671 }
duke@435 2672 }
duke@435 2673 return;
duke@435 2674 }
duke@435 2675 k++; cur_sz = k * word_sz;
duke@435 2676 }
duke@435 2677 // Otherwise, we'll split a block from the dictionary.
duke@435 2678 FreeChunk* fc = NULL;
duke@435 2679 FreeChunk* rem_fc = NULL;
duke@435 2680 size_t rem;
duke@435 2681 {
duke@435 2682 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2683 Mutex::_no_safepoint_check_flag);
duke@435 2684 while (n > 0) {
duke@435 2685 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2686 _dictionary->minSize()),
duke@435 2687 FreeBlockDictionary::atLeast);
duke@435 2688 if (fc != NULL) {
duke@435 2689 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
duke@435 2690 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2691 true /*split*/,
duke@435 2692 false /*birth*/);
duke@435 2693 break;
duke@435 2694 } else {
duke@435 2695 n--;
duke@435 2696 }
duke@435 2697 }
duke@435 2698 if (fc == NULL) return;
duke@435 2699 // Otherwise, split up that block.
duke@435 2700 size_t nn = fc->size() / word_sz;
duke@435 2701 n = MIN2(nn, n);
duke@435 2702 rem = fc->size() - n * word_sz;
duke@435 2703 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2704 if (rem > 0 && rem < MinChunkSize) {
duke@435 2705 n--; rem += word_sz;
duke@435 2706 }
duke@435 2707 // First return the remainder, if any.
duke@435 2708 // Note that we hold the lock until we decide if we're going to give
duke@435 2709 // back the remainder to the dictionary, since a contending allocator
duke@435 2710 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2711 // hit if the block is a small block.)
duke@435 2712 if (rem > 0) {
duke@435 2713 size_t prefix_size = n * word_sz;
duke@435 2714 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2715 rem_fc->setSize(rem);
duke@435 2716 rem_fc->linkNext(NULL);
duke@435 2717 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2718 // Above must occur before BOT is updated below.
duke@435 2719 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
duke@435 2720 if (rem >= IndexSetSize) {
duke@435 2721 returnChunkToDictionary(rem_fc);
duke@435 2722 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2723 true /*split*/,
duke@435 2724 true /*birth*/);
duke@435 2725 rem_fc = NULL;
duke@435 2726 }
duke@435 2727 // Otherwise, return it to the small list below.
duke@435 2728 }
duke@435 2729 }
duke@435 2730 //
duke@435 2731 if (rem_fc != NULL) {
duke@435 2732 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2733 Mutex::_no_safepoint_check_flag);
duke@435 2734 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2735 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2736 smallSplitBirth(rem);
duke@435 2737 }
duke@435 2738
duke@435 2739 // Now do the splitting up.
duke@435 2740 // Must do this in reverse order, so that anybody attempting to
duke@435 2741 // access the main chunk sees it as a single free block until we
duke@435 2742 // change it.
duke@435 2743 size_t fc_size = n * word_sz;
duke@435 2744 // All but first chunk in this loop
duke@435 2745 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2746 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2747 ffc->setSize(word_sz);
duke@435 2748 ffc->linkNext(NULL);
duke@435 2749 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2750 // Above must occur before BOT is updated below.
duke@435 2751 // splitting from the right, fc_size == (n - i + 1) * wordsize
duke@435 2752 _bt.mark_block((HeapWord*)ffc, word_sz);
duke@435 2753 fc_size -= word_sz;
duke@435 2754 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2755 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2756 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2757 // Push this on "fl".
duke@435 2758 fl->returnChunkAtHead(ffc);
duke@435 2759 }
duke@435 2760 // First chunk
duke@435 2761 fc->setSize(word_sz);
duke@435 2762 fc->linkNext(NULL);
duke@435 2763 fc->linkPrev(NULL);
duke@435 2764 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2765 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2766 fl->returnChunkAtHead(fc);
duke@435 2767
duke@435 2768 {
duke@435 2769 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2770 Mutex::_no_safepoint_check_flag);
duke@435 2771 ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
duke@435 2772 _indexedFreeList[word_sz].set_splitBirths(new_births);
duke@435 2773 ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
duke@435 2774 _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2775 }
duke@435 2776
duke@435 2777 // TRAP
duke@435 2778 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2779 }
duke@435 2780
duke@435 2781 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2782 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2783 // XXX Need to suitably abstract and generalize this and the next
duke@435 2784 // method into one.
duke@435 2785 void
duke@435 2786 CompactibleFreeListSpace::
duke@435 2787 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2788 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2789 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2790 const size_t task_size = rescan_task_size();
duke@435 2791 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
duke@435 2792 assert((used_region().start() + (n_tasks - 1)*task_size <
duke@435 2793 used_region().end()) &&
duke@435 2794 (used_region().start() + n_tasks*task_size >=
duke@435 2795 used_region().end()), "n_task calculation incorrect");
duke@435 2796 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2797 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2798 pst->set_par_threads(n_threads);
duke@435 2799 pst->set_n_tasks((int)n_tasks);
duke@435 2800 }
duke@435 2801
duke@435 2802 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2803 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2804 void
duke@435 2805 CompactibleFreeListSpace::
duke@435 2806 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2807 HeapWord* low) {
duke@435 2808 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2809 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2810 const size_t task_size = marking_task_size();
duke@435 2811 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 2812 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 2813 "Otherwise arithmetic below would be incorrect");
duke@435 2814 MemRegion span = _gen->reserved();
duke@435 2815 if (low != NULL) {
duke@435 2816 if (span.contains(low)) {
duke@435 2817 // Align low down to a card boundary so that
duke@435 2818 // we can use block_offset_careful() on span boundaries.
duke@435 2819 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 2820 CardTableModRefBS::card_size);
duke@435 2821 // Clip span prefix at aligned_low
duke@435 2822 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 2823 } else if (low > span.end()) {
duke@435 2824 span = MemRegion(low, low); // Null region
duke@435 2825 } // else use entire span
duke@435 2826 }
duke@435 2827 assert(span.is_empty() ||
duke@435 2828 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 2829 "span should start at a card boundary");
duke@435 2830 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 2831 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 2832 assert(n_tasks == 0 ||
duke@435 2833 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 2834 (span.start() + n_tasks*task_size >= span.end())),
duke@435 2835 "n_task calculation incorrect");
duke@435 2836 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2837 assert(!pst->valid(), "Clobbering existing data?");
duke@435 2838 pst->set_par_threads(n_threads);
duke@435 2839 pst->set_n_tasks((int)n_tasks);
duke@435 2840 }

mercurial