src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 25 Oct 2011 20:15:41 -0700

author
ysr
date
Tue, 25 Oct 2011 20:15:41 -0700
changeset 3220
c08412904149
parent 2958
1e3493ac2d11
child 3264
5a5ed80bea5b
permissions
-rw-r--r--

7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
Summary: Suitably weaken asserts that were in each case a tad too strong; fix up some loose uses of parameters in code related to size-indexed free list table.
Reviewed-by: jmasa, brutisso, stefank

duke@435 1 /*
trims@2708 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 32 #include "gc_interface/collectedHeap.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/globals.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/init.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/vmThread.hpp"
stefank@2314 43 #include "utilities/copy.hpp"
duke@435 44
duke@435 45 /////////////////////////////////////////////////////////////////////////
duke@435 46 //// CompactibleFreeListSpace
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48
duke@435 49 // highest ranked free list lock rank
duke@435 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 51
kvn@1926 52 // Defaults are 0 so things will break badly if incorrectly initialized.
kvn@1926 53 int CompactibleFreeListSpace::IndexSetStart = 0;
kvn@1926 54 int CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 55
kvn@1926 56 size_t MinChunkSize = 0;
kvn@1926 57
kvn@1926 58 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 59 // Set CMS global values
kvn@1926 60 assert(MinChunkSize == 0, "already set");
kvn@1926 61 #define numQuanta(x,y) ((x+y-1)/y)
kvn@1926 62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
kvn@1926 63
kvn@1926 64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3220 65 IndexSetStart = (int) MinChunkSize;
kvn@1926 66 IndexSetStride = MinObjAlignment;
kvn@1926 67 }
kvn@1926 68
duke@435 69 // Constructor
duke@435 70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 71 MemRegion mr, bool use_adaptive_freelists,
duke@435 72 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 73 _dictionaryChoice(dictionaryChoice),
duke@435 74 _adaptive_freelists(use_adaptive_freelists),
duke@435 75 _bt(bs, mr),
duke@435 76 // free list locks are in the range of values taken by _lockRank
duke@435 77 // This range currently is [_leaf+2, _leaf+3]
duke@435 78 // Note: this requires that CFLspace c'tors
duke@435 79 // are called serially in the order in which the locks are
duke@435 80 // are acquired in the program text. This is true today.
duke@435 81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 83 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 85 CMSRescanMultiple),
duke@435 86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 87 CMSConcMarkMultiple),
duke@435 88 _collector(NULL)
duke@435 89 {
duke@435 90 _bt.set_space(this);
jmasa@698 91 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 92 // We have all of "mr", all of which we place in the dictionary
duke@435 93 // as one big chunk. We'll need to decide here which of several
duke@435 94 // possible alternative dictionary implementations to use. For
duke@435 95 // now the choice is easy, since we have only one working
duke@435 96 // implementation, namely, the simple binary tree (splaying
duke@435 97 // temporarily disabled).
duke@435 98 switch (dictionaryChoice) {
duke@435 99 case FreeBlockDictionary::dictionarySplayTree:
duke@435 100 case FreeBlockDictionary::dictionarySkipList:
duke@435 101 default:
duke@435 102 warning("dictionaryChoice: selected option not understood; using"
duke@435 103 " default BinaryTreeDictionary implementation instead.");
ysr@1580 104 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 105 _dictionary = new BinaryTreeDictionary(mr);
duke@435 106 break;
duke@435 107 }
duke@435 108 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 109 // The indexed free lists are initially all empty and are lazily
duke@435 110 // filled in on demand. Initialize the array elements to NULL.
duke@435 111 initializeIndexedFreeListArray();
duke@435 112
duke@435 113 // Not using adaptive free lists assumes that allocation is first
duke@435 114 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 115 // has to have the klass's klassKlass allocated at a lower
duke@435 116 // address in the heap than the klass so that the klassKlass is
duke@435 117 // moved to its new location before the klass is moved.
duke@435 118 // Set the _refillSize for the linear allocation blocks
duke@435 119 if (!use_adaptive_freelists) {
duke@435 120 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 121 // The small linAB initially has all the space and will allocate
duke@435 122 // a chunk of any size.
duke@435 123 HeapWord* addr = (HeapWord*) fc;
duke@435 124 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 125 1024*SmallForLinearAlloc, fc->size());
duke@435 126 // Note that _unallocated_block is not updated here.
duke@435 127 // Allocations from the linear allocation block should
duke@435 128 // update it.
duke@435 129 } else {
duke@435 130 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 131 SmallForLinearAlloc);
duke@435 132 }
duke@435 133 // CMSIndexedFreeListReplenish should be at least 1
duke@435 134 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 135 _promoInfo.setSpace(this);
duke@435 136 if (UseCMSBestFit) {
duke@435 137 _fitStrategy = FreeBlockBestFitFirst;
duke@435 138 } else {
duke@435 139 _fitStrategy = FreeBlockStrategyNone;
duke@435 140 }
ysr@3220 141 check_free_list_consistency();
duke@435 142
duke@435 143 // Initialize locks for parallel case.
jmasa@2188 144
jmasa@2188 145 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 147 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 148 "a freelist par lock",
duke@435 149 true);
duke@435 150 if (_indexedFreeListParLocks[i] == NULL)
duke@435 151 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 152 DEBUG_ONLY(
duke@435 153 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 154 )
duke@435 155 }
duke@435 156 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 157 }
duke@435 158 }
duke@435 159
duke@435 160 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 161 // update the block offset table. Removed initialize_threshold call because
duke@435 162 // CFLS does not use a block offset array for contiguous spaces.
duke@435 163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 164 CompactPoint* cp, HeapWord* compact_top) {
duke@435 165 // q is alive
duke@435 166 // First check if we should switch compaction space
duke@435 167 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 168 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 169 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 170 "virtual adjustObjectSize_v() method is not correct");
duke@435 171 size_t adjusted_size = adjustObjectSize(size);
duke@435 172 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 173 "no small fragments allowed");
duke@435 174 assert(minimum_free_block_size() == MinChunkSize,
duke@435 175 "for de-virtualized reference below");
duke@435 176 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 177 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 178 adjusted_size != compaction_max_size) {
duke@435 179 do {
duke@435 180 // switch to next compaction space
duke@435 181 cp->space->set_compaction_top(compact_top);
duke@435 182 cp->space = cp->space->next_compaction_space();
duke@435 183 if (cp->space == NULL) {
duke@435 184 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 185 assert(cp->gen != NULL, "compaction must succeed");
duke@435 186 cp->space = cp->gen->first_compaction_space();
duke@435 187 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 188 }
duke@435 189 compact_top = cp->space->bottom();
duke@435 190 cp->space->set_compaction_top(compact_top);
duke@435 191 // The correct adjusted_size may not be the same as that for this method
duke@435 192 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 193 // Use the virtual method which is not used above to save the virtual
duke@435 194 // dispatch.
duke@435 195 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 196 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 197 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 198 } while (adjusted_size > compaction_max_size);
duke@435 199 }
duke@435 200
duke@435 201 // store the forwarding pointer into the mark word
duke@435 202 if ((HeapWord*)q != compact_top) {
duke@435 203 q->forward_to(oop(compact_top));
duke@435 204 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 205 } else {
duke@435 206 // if the object isn't moving we can just set the mark to the default
duke@435 207 // mark and handle it specially later on.
duke@435 208 q->init_mark();
duke@435 209 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 210 }
duke@435 211
coleenp@548 212 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 213 compact_top += adjusted_size;
duke@435 214
duke@435 215 // we need to update the offset table so that the beginnings of objects can be
duke@435 216 // found during scavenge. Note that we are updating the offset table based on
duke@435 217 // where the object will be once the compaction phase finishes.
duke@435 218
duke@435 219 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 220 // the compaction_top exceeds the current threshold but not for an
duke@435 221 // non-contiguous space.
duke@435 222 cp->threshold =
duke@435 223 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 224 return compact_top;
duke@435 225 }
duke@435 226
duke@435 227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 228 // and use of single_block instead of alloc_block. The name here is not really
duke@435 229 // appropriate - maybe a more general name could be invented for both the
duke@435 230 // contiguous and noncontiguous spaces.
duke@435 231
duke@435 232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 233 _bt.single_block(start, the_end);
duke@435 234 return end();
duke@435 235 }
duke@435 236
duke@435 237 // Initialize them to NULL.
duke@435 238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 239 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 240 // Note that on platforms where objects are double word aligned,
duke@435 241 // the odd array elements are not used. It is convenient, however,
duke@435 242 // to map directly from the object size to the array element.
duke@435 243 _indexedFreeList[i].reset(IndexSetSize);
duke@435 244 _indexedFreeList[i].set_size(i);
duke@435 245 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 246 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 247 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 248 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 249 }
duke@435 250 }
duke@435 251
duke@435 252 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
duke@435 253 for (int i = 1; i < IndexSetSize; i++) {
duke@435 254 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 255 "Indexed free list sizes are incorrect");
duke@435 256 _indexedFreeList[i].reset(IndexSetSize);
duke@435 257 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 258 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 259 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 260 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 261 }
duke@435 262 }
duke@435 263
duke@435 264 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 265 resetIndexedFreeListArray();
duke@435 266 dictionary()->reset();
duke@435 267 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 268 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 269 // Everything's allocated until proven otherwise.
duke@435 270 _bt.set_unallocated_block(end());
duke@435 271 }
duke@435 272 if (!mr.is_empty()) {
duke@435 273 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 274 _bt.single_block(mr.start(), mr.word_size());
duke@435 275 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 276 fc->setSize(mr.word_size());
duke@435 277 if (mr.word_size() >= IndexSetSize ) {
duke@435 278 returnChunkToDictionary(fc);
duke@435 279 } else {
duke@435 280 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 281 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 282 }
duke@435 283 }
duke@435 284 _promoInfo.reset();
duke@435 285 _smallLinearAllocBlock._ptr = NULL;
duke@435 286 _smallLinearAllocBlock._word_size = 0;
duke@435 287 }
duke@435 288
duke@435 289 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 290 // Reset the space to the new reality - one free chunk.
duke@435 291 MemRegion mr(compaction_top(), end());
duke@435 292 reset(mr);
duke@435 293 // Now refill the linear allocation block(s) if possible.
duke@435 294 if (_adaptive_freelists) {
duke@435 295 refillLinearAllocBlocksIfNeeded();
duke@435 296 } else {
duke@435 297 // Place as much of mr in the linAB as we can get,
duke@435 298 // provided it was big enough to go into the dictionary.
duke@435 299 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 300 if (fc != NULL) {
duke@435 301 assert(fc->size() == mr.word_size(),
duke@435 302 "Why was the chunk broken up?");
duke@435 303 removeChunkFromDictionary(fc);
duke@435 304 HeapWord* addr = (HeapWord*) fc;
duke@435 305 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 306 1024*SmallForLinearAlloc, fc->size());
duke@435 307 // Note that _unallocated_block is not updated here.
duke@435 308 }
duke@435 309 }
duke@435 310 }
duke@435 311
duke@435 312 // Walks the entire dictionary, returning a coterminal
duke@435 313 // chunk, if it exists. Use with caution since it involves
duke@435 314 // a potentially complete walk of a potentially large tree.
duke@435 315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 316
duke@435 317 assert_lock_strong(&_freelistLock);
duke@435 318
duke@435 319 return dictionary()->find_chunk_ends_at(end());
duke@435 320 }
duke@435 321
duke@435 322
duke@435 323 #ifndef PRODUCT
duke@435 324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 325 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 326 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 327 }
duke@435 328 }
duke@435 329
duke@435 330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 331 size_t sum = 0;
duke@435 332 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 333 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 334 }
duke@435 335 return sum;
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 339 size_t count = 0;
kvn@1926 340 for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
duke@435 341 debug_only(
duke@435 342 ssize_t total_list_count = 0;
duke@435 343 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 344 fc = fc->next()) {
duke@435 345 total_list_count++;
duke@435 346 }
duke@435 347 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 348 "Count in list is incorrect");
duke@435 349 )
duke@435 350 count += _indexedFreeList[i].count();
duke@435 351 }
duke@435 352 return count;
duke@435 353 }
duke@435 354
duke@435 355 size_t CompactibleFreeListSpace::totalCount() {
duke@435 356 size_t num = totalCountInIndexedFreeLists();
duke@435 357 num += dictionary()->totalCount();
duke@435 358 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 359 num++;
duke@435 360 }
duke@435 361 return num;
duke@435 362 }
duke@435 363 #endif
duke@435 364
duke@435 365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 366 FreeChunk* fc = (FreeChunk*) p;
duke@435 367 return fc->isFree();
duke@435 368 }
duke@435 369
duke@435 370 size_t CompactibleFreeListSpace::used() const {
duke@435 371 return capacity() - free();
duke@435 372 }
duke@435 373
duke@435 374 size_t CompactibleFreeListSpace::free() const {
duke@435 375 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 376 // if you do this while the structures are in flux you
duke@435 377 // may get an approximate answer only; for instance
duke@435 378 // because there is concurrent allocation either
duke@435 379 // directly by mutators or for promotion during a GC.
duke@435 380 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 381 // not to crash and burn, for instance, because of walking
duke@435 382 // pointers that could disappear as you were walking them.
duke@435 383 // The approximation is because the various components
duke@435 384 // that are read below are not read atomically (and
duke@435 385 // further the computation of totalSizeInIndexedFreeLists()
duke@435 386 // is itself a non-atomic computation. The normal use of
duke@435 387 // this is during a resize operation at the end of GC
duke@435 388 // and at that time you are guaranteed to get the
duke@435 389 // correct actual value. However, for instance, this is
duke@435 390 // also read completely asynchronously by the "perf-sampler"
duke@435 391 // that supports jvmstat, and you are apt to see the values
duke@435 392 // flicker in such cases.
duke@435 393 assert(_dictionary != NULL, "No _dictionary?");
duke@435 394 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 395 totalSizeInIndexedFreeLists() +
duke@435 396 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 397 }
duke@435 398
duke@435 399 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 400 assert(_dictionary != NULL, "No _dictionary?");
duke@435 401 assert_locked();
duke@435 402 size_t res = _dictionary->maxChunkSize();
duke@435 403 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 404 (size_t) SmallForLinearAlloc - 1));
duke@435 405 // XXX the following could potentially be pretty slow;
duke@435 406 // should one, pesimally for the rare cases when res
duke@435 407 // caclulated above is less than IndexSetSize,
duke@435 408 // just return res calculated above? My reasoning was that
duke@435 409 // those cases will be so rare that the extra time spent doesn't
duke@435 410 // really matter....
duke@435 411 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 412 // to i > res below, because i is unsigned and res may be zero.
duke@435 413 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 414 i -= IndexSetStride) {
duke@435 415 if (_indexedFreeList[i].head() != NULL) {
duke@435 416 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 417 return i;
duke@435 418 }
duke@435 419 }
duke@435 420 return res;
duke@435 421 }
duke@435 422
ysr@2071 423 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 424 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 425 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
ysr@2071 426 _ptr, _word_size, _refillSize, _allocation_size_limit);
ysr@2071 427 }
ysr@2071 428
ysr@2071 429 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 430 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 431 st->print_cr(" Space:");
ysr@2071 432 Space::print_on(st);
ysr@2071 433
ysr@2071 434 st->print_cr("promoInfo:");
ysr@2071 435 _promoInfo.print_on(st);
ysr@2071 436
ysr@2071 437 st->print_cr("_smallLinearAllocBlock");
ysr@2071 438 _smallLinearAllocBlock.print_on(st);
ysr@2071 439
ysr@2071 440 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 441
ysr@2071 442 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 443 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 444 }
ysr@2071 445
ysr@1580 446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 447 const {
ysr@1580 448 reportIndexedFreeListStatistics();
ysr@1580 449 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 450 gclog_or_tty->print_cr("---------------------------");
ysr@1580 451 FreeList::print_labels_on(st, "size");
ysr@1580 452 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 453 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 454 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 455 fc = fc->next()) {
ysr@1580 456 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 457 fc, (HeapWord*)fc + i,
ysr@1580 458 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 459 }
ysr@1580 460 }
ysr@1580 461 }
ysr@1580 462
ysr@1580 463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 464 const {
ysr@1580 465 _promoInfo.print_on(st);
ysr@1580 466 }
ysr@1580 467
ysr@1580 468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 469 const {
ysr@1580 470 _dictionary->reportStatistics();
ysr@1580 471 st->print_cr("Layout of Freelists in Tree");
ysr@1580 472 st->print_cr("---------------------------");
ysr@1580 473 _dictionary->print_free_lists(st);
ysr@1580 474 }
ysr@1580 475
ysr@1580 476 class BlkPrintingClosure: public BlkClosure {
ysr@1580 477 const CMSCollector* _collector;
ysr@1580 478 const CompactibleFreeListSpace* _sp;
ysr@1580 479 const CMSBitMap* _live_bit_map;
ysr@1580 480 const bool _post_remark;
ysr@1580 481 outputStream* _st;
ysr@1580 482 public:
ysr@1580 483 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 484 const CompactibleFreeListSpace* sp,
ysr@1580 485 const CMSBitMap* live_bit_map,
ysr@1580 486 outputStream* st):
ysr@1580 487 _collector(collector),
ysr@1580 488 _sp(sp),
ysr@1580 489 _live_bit_map(live_bit_map),
ysr@1580 490 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 491 _st(st) { }
ysr@1580 492 size_t do_blk(HeapWord* addr);
ysr@1580 493 };
ysr@1580 494
ysr@1580 495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 496 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 497 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 498 if (_sp->block_is_obj(addr)) {
ysr@1580 499 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 500 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 501 addr,
ysr@1580 502 dead ? "dead" : "live",
ysr@1580 503 sz,
ysr@1580 504 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 505 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 506 oop(addr)->print_on(_st);
ysr@1580 507 _st->print_cr("--------------------------------------");
ysr@1580 508 }
ysr@1580 509 } else { // free block
ysr@1580 510 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 511 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 512 if (CMSPrintChunksInDump) {
ysr@1580 513 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 514 _st->print_cr("--------------------------------------");
ysr@1580 515 }
ysr@1580 516 }
ysr@1580 517 return sz;
ysr@1580 518 }
ysr@1580 519
ysr@1580 520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 521 outputStream* st) {
ysr@1580 522 st->print_cr("\n=========================");
ysr@1580 523 st->print_cr("Block layout in CMS Heap:");
ysr@1580 524 st->print_cr("=========================");
ysr@1580 525 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 526 blk_iterate(&bpcl);
ysr@1580 527
ysr@1580 528 st->print_cr("\n=======================================");
ysr@1580 529 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 530 st->print_cr("=======================================");
ysr@1580 531 print_promo_info_blocks(st);
ysr@1580 532
ysr@1580 533 st->print_cr("\n===========================");
ysr@1580 534 st->print_cr("Order of Indexed Free Lists");
ysr@1580 535 st->print_cr("=========================");
ysr@1580 536 print_indexed_free_lists(st);
ysr@1580 537
ysr@1580 538 st->print_cr("\n=================================");
ysr@1580 539 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 540 st->print_cr("=================================");
ysr@1580 541 print_dictionary_free_lists(st);
ysr@1580 542 }
ysr@1580 543
ysr@1580 544
duke@435 545 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 546 assert_lock_strong(&_freelistLock);
duke@435 547 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 548 _dictionary->reportStatistics();
duke@435 549 if (PrintFLSStatistics > 1) {
duke@435 550 reportIndexedFreeListStatistics();
duke@435 551 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 552 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 553 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 554 }
duke@435 555 }
duke@435 556
duke@435 557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 558 assert_lock_strong(&_freelistLock);
duke@435 559 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 560 "--------------------------------\n");
duke@435 561 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 562 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 563 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 564 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 565 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 566 if (freeBlocks != 0) {
duke@435 567 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 568 }
duke@435 569 }
duke@435 570
duke@435 571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 572 size_t res = 0;
duke@435 573 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 574 debug_only(
duke@435 575 ssize_t recount = 0;
duke@435 576 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 577 fc = fc->next()) {
duke@435 578 recount += 1;
duke@435 579 }
duke@435 580 assert(recount == _indexedFreeList[i].count(),
duke@435 581 "Incorrect count in list");
duke@435 582 )
duke@435 583 res += _indexedFreeList[i].count();
duke@435 584 }
duke@435 585 return res;
duke@435 586 }
duke@435 587
duke@435 588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 589 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 590 if (_indexedFreeList[i].head() != NULL) {
duke@435 591 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 592 return (size_t)i;
duke@435 593 }
duke@435 594 }
duke@435 595 return 0;
duke@435 596 }
duke@435 597
duke@435 598 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 599 HeapWord* prevEnd = end();
duke@435 600 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 601 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 602 "New end is below unallocated block");
duke@435 603 _end = value;
duke@435 604 if (prevEnd != NULL) {
duke@435 605 // Resize the underlying block offset table.
duke@435 606 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 607 if (value <= prevEnd) {
ysr@2071 608 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 609 "New end is below unallocated block");
ysr@1580 610 } else {
ysr@1580 611 // Now, take this new chunk and add it to the free blocks.
ysr@1580 612 // Note that the BOT has not yet been updated for this block.
ysr@1580 613 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 614 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 615 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 616 // Mark the boundary of the new block in BOT
ysr@1580 617 _bt.mark_block(prevEnd, value);
ysr@1580 618 // put it all in the linAB
ysr@1580 619 if (ParallelGCThreads == 0) {
ysr@1580 620 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 621 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 622 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 623 } else { // ParallelGCThreads > 0
ysr@1580 624 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 625 Mutex::_no_safepoint_check_flag);
ysr@1580 626 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 627 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 628 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 629 }
ysr@1580 630 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 631 // of chunks as they are allocated out of a LinAB are.
ysr@1580 632 } else {
ysr@1580 633 // Add the block to the free lists, if possible coalescing it
ysr@1580 634 // with the last free block, and update the BOT and census data.
ysr@1580 635 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 636 }
duke@435 637 }
duke@435 638 }
duke@435 639 }
duke@435 640
duke@435 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 642 CompactibleFreeListSpace* _cfls;
duke@435 643 CMSCollector* _collector;
duke@435 644 protected:
duke@435 645 // Override.
duke@435 646 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 647 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 648 HeapWord* bottom, HeapWord* top, \
duke@435 649 ClosureType* cl); \
duke@435 650 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 651 HeapWord* bottom, HeapWord* top, \
duke@435 652 ClosureType* cl); \
duke@435 653 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 654 HeapWord* bottom, HeapWord* top, \
duke@435 655 ClosureType* cl)
duke@435 656 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 657 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 658
duke@435 659 public:
duke@435 660 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 661 CMSCollector* collector,
duke@435 662 OopClosure* cl,
duke@435 663 CardTableModRefBS::PrecisionStyle precision,
duke@435 664 HeapWord* boundary) :
duke@435 665 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 666 _cfls(sp), _collector(collector) {}
duke@435 667 };
duke@435 668
duke@435 669 // We de-virtualize the block-related calls below, since we know that our
duke@435 670 // space is a CompactibleFreeListSpace.
duke@435 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 673 HeapWord* bottom, \
duke@435 674 HeapWord* top, \
duke@435 675 ClosureType* cl) { \
duke@435 676 if (SharedHeap::heap()->n_par_threads() > 0) { \
duke@435 677 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 678 } else { \
duke@435 679 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 680 } \
duke@435 681 } \
duke@435 682 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 683 HeapWord* bottom, \
duke@435 684 HeapWord* top, \
duke@435 685 ClosureType* cl) { \
duke@435 686 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 687 back too far. */ \
duke@435 688 HeapWord* mr_start = mr.start(); \
duke@435 689 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 690 HeapWord* next = bottom + bot_size; \
duke@435 691 while (next < mr_start) { \
duke@435 692 bottom = next; \
duke@435 693 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 694 next = bottom + bot_size; \
duke@435 695 } \
duke@435 696 \
duke@435 697 while (bottom < top) { \
duke@435 698 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 699 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 700 oop(bottom)) && \
duke@435 701 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 702 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 703 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 704 } else { \
duke@435 705 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 706 } \
duke@435 707 } \
duke@435 708 } \
duke@435 709 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 710 HeapWord* bottom, \
duke@435 711 HeapWord* top, \
duke@435 712 ClosureType* cl) { \
duke@435 713 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 714 back too far. */ \
duke@435 715 HeapWord* mr_start = mr.start(); \
duke@435 716 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 717 HeapWord* next = bottom + bot_size; \
duke@435 718 while (next < mr_start) { \
duke@435 719 bottom = next; \
duke@435 720 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 721 next = bottom + bot_size; \
duke@435 722 } \
duke@435 723 \
duke@435 724 while (bottom < top) { \
duke@435 725 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 726 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 727 oop(bottom)) && \
duke@435 728 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 729 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 730 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 731 } else { \
duke@435 732 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 733 } \
duke@435 734 } \
duke@435 735 }
duke@435 736
duke@435 737 // (There are only two of these, rather than N, because the split is due
duke@435 738 // only to the introduction of the FilteringClosure, a local part of the
duke@435 739 // impl of this abstraction.)
duke@435 740 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 742
duke@435 743 DirtyCardToOopClosure*
duke@435 744 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 745 CardTableModRefBS::PrecisionStyle precision,
duke@435 746 HeapWord* boundary) {
duke@435 747 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 748 }
duke@435 749
duke@435 750
duke@435 751 // Note on locking for the space iteration functions:
duke@435 752 // since the collector's iteration activities are concurrent with
duke@435 753 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 754 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 755 // may suddenly be allocated or divided up and part of it allocated and
duke@435 756 // so on.
duke@435 757
duke@435 758 // Apply the given closure to each block in the space.
duke@435 759 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 760 assert_lock_strong(freelistLock());
duke@435 761 HeapWord *cur, *limit;
duke@435 762 for (cur = bottom(), limit = end(); cur < limit;
duke@435 763 cur += cl->do_blk_careful(cur));
duke@435 764 }
duke@435 765
duke@435 766 // Apply the given closure to each block in the space.
duke@435 767 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 768 assert_lock_strong(freelistLock());
duke@435 769 HeapWord *cur, *limit;
duke@435 770 for (cur = bottom(), limit = end(); cur < limit;
duke@435 771 cur += cl->do_blk(cur));
duke@435 772 }
duke@435 773
duke@435 774 // Apply the given closure to each oop in the space.
duke@435 775 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 776 assert_lock_strong(freelistLock());
duke@435 777 HeapWord *cur, *limit;
duke@435 778 size_t curSize;
duke@435 779 for (cur = bottom(), limit = end(); cur < limit;
duke@435 780 cur += curSize) {
duke@435 781 curSize = block_size(cur);
duke@435 782 if (block_is_obj(cur)) {
duke@435 783 oop(cur)->oop_iterate(cl);
duke@435 784 }
duke@435 785 }
duke@435 786 }
duke@435 787
duke@435 788 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 789 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 790 assert_lock_strong(freelistLock());
duke@435 791 if (is_empty()) {
duke@435 792 return;
duke@435 793 }
duke@435 794 MemRegion cur = MemRegion(bottom(), end());
duke@435 795 mr = mr.intersection(cur);
duke@435 796 if (mr.is_empty()) {
duke@435 797 return;
duke@435 798 }
duke@435 799 if (mr.equals(cur)) {
duke@435 800 oop_iterate(cl);
duke@435 801 return;
duke@435 802 }
duke@435 803 assert(mr.end() <= end(), "just took an intersection above");
duke@435 804 HeapWord* obj_addr = block_start(mr.start());
duke@435 805 HeapWord* t = mr.end();
duke@435 806
duke@435 807 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 808 if (block_is_obj(obj_addr)) {
duke@435 809 // Handle first object specially.
duke@435 810 oop obj = oop(obj_addr);
duke@435 811 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 812 } else {
duke@435 813 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 814 obj_addr += fc->size();
duke@435 815 }
duke@435 816 while (obj_addr < t) {
duke@435 817 HeapWord* obj = obj_addr;
duke@435 818 obj_addr += block_size(obj_addr);
duke@435 819 // If "obj_addr" is not greater than top, then the
duke@435 820 // entire object "obj" is within the region.
duke@435 821 if (obj_addr <= t) {
duke@435 822 if (block_is_obj(obj)) {
duke@435 823 oop(obj)->oop_iterate(cl);
duke@435 824 }
duke@435 825 } else {
duke@435 826 // "obj" extends beyond end of region
duke@435 827 if (block_is_obj(obj)) {
duke@435 828 oop(obj)->oop_iterate(&smr_blk);
duke@435 829 }
duke@435 830 break;
duke@435 831 }
duke@435 832 }
duke@435 833 }
duke@435 834
duke@435 835 // NOTE: In the following methods, in order to safely be able to
duke@435 836 // apply the closure to an object, we need to be sure that the
duke@435 837 // object has been initialized. We are guaranteed that an object
duke@435 838 // is initialized if we are holding the Heap_lock with the
duke@435 839 // world stopped.
duke@435 840 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 841 if (is_init_completed()) {
duke@435 842 assert_locked_or_safepoint(Heap_lock);
duke@435 843 if (Universe::is_fully_initialized()) {
duke@435 844 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 845 "Required for objects to be initialized");
duke@435 846 }
duke@435 847 } // else make a concession at vm start-up
duke@435 848 }
duke@435 849
duke@435 850 // Apply the given closure to each object in the space
duke@435 851 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 852 assert_lock_strong(freelistLock());
duke@435 853 NOT_PRODUCT(verify_objects_initialized());
duke@435 854 HeapWord *cur, *limit;
duke@435 855 size_t curSize;
duke@435 856 for (cur = bottom(), limit = end(); cur < limit;
duke@435 857 cur += curSize) {
duke@435 858 curSize = block_size(cur);
duke@435 859 if (block_is_obj(cur)) {
duke@435 860 blk->do_object(oop(cur));
duke@435 861 }
duke@435 862 }
duke@435 863 }
duke@435 864
jmasa@952 865 // Apply the given closure to each live object in the space
jmasa@952 866 // The usage of CompactibleFreeListSpace
jmasa@952 867 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 868 // objects in the space with references to objects that are no longer
jmasa@952 869 // valid. For example, an object may reference another object
jmasa@952 870 // that has already been sweep up (collected). This method uses
jmasa@952 871 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 872 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 873 // object is decided.
jmasa@952 874
jmasa@952 875 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 876 assert_lock_strong(freelistLock());
jmasa@952 877 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 878 HeapWord *cur, *limit;
jmasa@952 879 size_t curSize;
jmasa@952 880 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 881 cur += curSize) {
jmasa@952 882 curSize = block_size(cur);
jmasa@952 883 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 884 blk->do_object(oop(cur));
jmasa@952 885 }
jmasa@952 886 }
jmasa@952 887 }
jmasa@952 888
duke@435 889 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 890 UpwardsObjectClosure* cl) {
ysr@1580 891 assert_locked(freelistLock());
duke@435 892 NOT_PRODUCT(verify_objects_initialized());
duke@435 893 Space::object_iterate_mem(mr, cl);
duke@435 894 }
duke@435 895
duke@435 896 // Callers of this iterator beware: The closure application should
duke@435 897 // be robust in the face of uninitialized objects and should (always)
duke@435 898 // return a correct size so that the next addr + size below gives us a
duke@435 899 // valid block boundary. [See for instance,
duke@435 900 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 901 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 902 HeapWord*
duke@435 903 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 904 assert_lock_strong(freelistLock());
duke@435 905 HeapWord *addr, *last;
duke@435 906 size_t size;
duke@435 907 for (addr = bottom(), last = end();
duke@435 908 addr < last; addr += size) {
duke@435 909 FreeChunk* fc = (FreeChunk*)addr;
duke@435 910 if (fc->isFree()) {
duke@435 911 // Since we hold the free list lock, which protects direct
duke@435 912 // allocation in this generation by mutators, a free object
duke@435 913 // will remain free throughout this iteration code.
duke@435 914 size = fc->size();
duke@435 915 } else {
duke@435 916 // Note that the object need not necessarily be initialized,
duke@435 917 // because (for instance) the free list lock does NOT protect
duke@435 918 // object initialization. The closure application below must
duke@435 919 // therefore be correct in the face of uninitialized objects.
duke@435 920 size = cl->do_object_careful(oop(addr));
duke@435 921 if (size == 0) {
duke@435 922 // An unparsable object found. Signal early termination.
duke@435 923 return addr;
duke@435 924 }
duke@435 925 }
duke@435 926 }
duke@435 927 return NULL;
duke@435 928 }
duke@435 929
duke@435 930 // Callers of this iterator beware: The closure application should
duke@435 931 // be robust in the face of uninitialized objects and should (always)
duke@435 932 // return a correct size so that the next addr + size below gives us a
duke@435 933 // valid block boundary. [See for instance,
duke@435 934 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 935 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 936 HeapWord*
duke@435 937 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 938 ObjectClosureCareful* cl) {
duke@435 939 assert_lock_strong(freelistLock());
duke@435 940 // Can't use used_region() below because it may not necessarily
duke@435 941 // be the same as [bottom(),end()); although we could
duke@435 942 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 943 // that appears too cumbersome, so we just do the simpler check
duke@435 944 // in the assertion below.
duke@435 945 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 946 "mr should be non-empty and within used space");
duke@435 947 HeapWord *addr, *end;
duke@435 948 size_t size;
duke@435 949 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 950 addr < end; addr += size) {
duke@435 951 FreeChunk* fc = (FreeChunk*)addr;
duke@435 952 if (fc->isFree()) {
duke@435 953 // Since we hold the free list lock, which protects direct
duke@435 954 // allocation in this generation by mutators, a free object
duke@435 955 // will remain free throughout this iteration code.
duke@435 956 size = fc->size();
duke@435 957 } else {
duke@435 958 // Note that the object need not necessarily be initialized,
duke@435 959 // because (for instance) the free list lock does NOT protect
duke@435 960 // object initialization. The closure application below must
duke@435 961 // therefore be correct in the face of uninitialized objects.
duke@435 962 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 963 if (size == 0) {
duke@435 964 // An unparsable object found. Signal early termination.
duke@435 965 return addr;
duke@435 966 }
duke@435 967 }
duke@435 968 }
duke@435 969 return NULL;
duke@435 970 }
duke@435 971
duke@435 972
ysr@777 973 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 974 NOT_PRODUCT(verify_objects_initialized());
duke@435 975 return _bt.block_start(p);
duke@435 976 }
duke@435 977
duke@435 978 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 979 return _bt.block_start_careful(p);
duke@435 980 }
duke@435 981
duke@435 982 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 983 NOT_PRODUCT(verify_objects_initialized());
duke@435 984 // This must be volatile, or else there is a danger that the compiler
duke@435 985 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 986 // the value read the first time in a register.
duke@435 987 while (true) {
duke@435 988 // We must do this until we get a consistent view of the object.
coleenp@622 989 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 990 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 991 size_t res = fc->size();
coleenp@622 992 // If the object is still a free chunk, return the size, else it
coleenp@622 993 // has been allocated so try again.
coleenp@622 994 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 995 assert(res != 0, "Block size should not be 0");
duke@435 996 return res;
duke@435 997 }
coleenp@622 998 } else {
coleenp@622 999 // must read from what 'p' points to in each loop.
coleenp@622 1000 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1001 if (k != NULL) {
ysr@2071 1002 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
coleenp@622 1003 oop o = (oop)p;
coleenp@622 1004 assert(o->is_parsable(), "Should be parsable");
coleenp@622 1005 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 1006 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1007 res = adjustObjectSize(res);
coleenp@622 1008 assert(res != 0, "Block size should not be 0");
coleenp@622 1009 return res;
coleenp@622 1010 }
duke@435 1011 }
duke@435 1012 }
duke@435 1013 }
duke@435 1014
duke@435 1015 // A variant of the above that uses the Printezis bits for
duke@435 1016 // unparsable but allocated objects. This avoids any possible
duke@435 1017 // stalls waiting for mutators to initialize objects, and is
duke@435 1018 // thus potentially faster than the variant above. However,
duke@435 1019 // this variant may return a zero size for a block that is
duke@435 1020 // under mutation and for which a consistent size cannot be
duke@435 1021 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1022 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1023 const CMSCollector* c)
duke@435 1024 const {
duke@435 1025 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1026 // This must be volatile, or else there is a danger that the compiler
duke@435 1027 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1028 // the value read the first time in a register.
duke@435 1029 DEBUG_ONLY(uint loops = 0;)
duke@435 1030 while (true) {
duke@435 1031 // We must do this until we get a consistent view of the object.
coleenp@622 1032 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1033 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1034 size_t res = fc->size();
coleenp@622 1035 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1036 assert(res != 0, "Block size should not be 0");
duke@435 1037 assert(loops == 0, "Should be 0");
duke@435 1038 return res;
duke@435 1039 }
duke@435 1040 } else {
coleenp@622 1041 // must read from what 'p' points to in each loop.
coleenp@622 1042 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1043 // We trust the size of any object that has a non-NULL
ysr@2533 1044 // klass and (for those in the perm gen) is parsable
ysr@2533 1045 // -- irrespective of its conc_safe-ty.
ysr@2533 1046 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
coleenp@622 1047 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 1048 oop o = (oop)p;
coleenp@622 1049 assert(o->is_oop(), "Should be an oop");
coleenp@622 1050 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1051 res = adjustObjectSize(res);
coleenp@622 1052 assert(res != 0, "Block size should not be 0");
coleenp@622 1053 return res;
coleenp@622 1054 } else {
ysr@2533 1055 // May return 0 if P-bits not present.
coleenp@622 1056 return c->block_size_if_printezis_bits(p);
coleenp@622 1057 }
duke@435 1058 }
duke@435 1059 assert(loops == 0, "Can loop at most once");
duke@435 1060 DEBUG_ONLY(loops++;)
duke@435 1061 }
duke@435 1062 }
duke@435 1063
duke@435 1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1065 NOT_PRODUCT(verify_objects_initialized());
duke@435 1066 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1067 FreeChunk* fc = (FreeChunk*)p;
duke@435 1068 if (fc->isFree()) {
duke@435 1069 return fc->size();
duke@435 1070 } else {
duke@435 1071 // Ignore mark word because this may be a recently promoted
duke@435 1072 // object whose mark word is used to chain together grey
duke@435 1073 // objects (the last one would have a null value).
duke@435 1074 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1075 return adjustObjectSize(oop(p)->size());
duke@435 1076 }
duke@435 1077 }
duke@435 1078
duke@435 1079 // This implementation assumes that the property of "being an object" is
duke@435 1080 // stable. But being a free chunk may not be (because of parallel
duke@435 1081 // promotion.)
duke@435 1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1083 FreeChunk* fc = (FreeChunk*)p;
duke@435 1084 assert(is_in_reserved(p), "Should be in space");
duke@435 1085 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1086 // assertion may fail because prepare_for_compaction() uses
duke@435 1087 // space that is garbage to maintain information on ranges of
duke@435 1088 // live objects so that these live ranges can be moved as a whole.
duke@435 1089 // Comment out this assertion until that problem can be solved
duke@435 1090 // (i.e., that the block start calculation may look at objects
duke@435 1091 // at address below "p" in finding the object that contains "p"
duke@435 1092 // and those objects (if garbage) may have been modified to hold
duke@435 1093 // live range information.
jmasa@2188 1094 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1095 // "Should be a block boundary");
coleenp@622 1096 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1097 klassOop k = oop(p)->klass_or_null();
duke@435 1098 if (k != NULL) {
duke@435 1099 // Ignore mark word because it may have been used to
duke@435 1100 // chain together promoted objects (the last one
duke@435 1101 // would have a null value).
duke@435 1102 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1103 return true;
duke@435 1104 } else {
duke@435 1105 return false; // Was not an object at the start of collection.
duke@435 1106 }
duke@435 1107 }
duke@435 1108
duke@435 1109 // Check if the object is alive. This fact is checked either by consulting
duke@435 1110 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1111 // generation and we're not in the sweeping phase, by checking the
duke@435 1112 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1113 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1114 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1115 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1116 "Else races are possible");
ysr@2293 1117 assert(block_is_obj(p), "The address should point to an object");
duke@435 1118
duke@435 1119 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1120 // for both perm gen and old gen.
duke@435 1121 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1122 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1123 // main marking bit map (live_map below) is locked,
duke@435 1124 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1125 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1126 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1127 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1128 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1129 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1130 // if/when the perm gen goes away in the future.
duke@435 1131 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1132 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1133 return live_map->par_isMarked((HeapWord*) p);
duke@435 1134 } else {
duke@435 1135 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1136 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1137 // in the perm gen. In this case, we use the "deadness" information
duke@435 1138 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1139 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1140 if (_collector->verifying()) {
duke@435 1141 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1142 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1143 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1144 // the object is alive.
duke@435 1145 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1146 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1147 } else {
duke@435 1148 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1149 }
duke@435 1150 }
duke@435 1151 }
duke@435 1152 return true;
duke@435 1153 }
duke@435 1154
duke@435 1155 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1156 FreeChunk* fc = (FreeChunk*)p;
duke@435 1157 assert(is_in_reserved(p), "Should be in space");
duke@435 1158 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 1159 if (!fc->isFree()) {
duke@435 1160 // Ignore mark word because it may have been used to
duke@435 1161 // chain together promoted objects (the last one
duke@435 1162 // would have a null value).
duke@435 1163 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1164 return true;
duke@435 1165 }
duke@435 1166 return false;
duke@435 1167 }
duke@435 1168
duke@435 1169 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1170 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1171 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1172 size_t size = 0;
duke@435 1173 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1174 debug_only(
duke@435 1175 // We may be calling here without the lock in which case we
duke@435 1176 // won't do this modest sanity check.
duke@435 1177 if (freelistLock()->owned_by_self()) {
duke@435 1178 size_t total_list_size = 0;
duke@435 1179 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1180 fc = fc->next()) {
duke@435 1181 total_list_size += i;
duke@435 1182 }
duke@435 1183 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1184 "Count in list is incorrect");
duke@435 1185 }
duke@435 1186 )
duke@435 1187 size += i * _indexedFreeList[i].count();
duke@435 1188 }
duke@435 1189 return size;
duke@435 1190 }
duke@435 1191
duke@435 1192 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1193 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1194 return allocate(size);
duke@435 1195 }
duke@435 1196
duke@435 1197 HeapWord*
duke@435 1198 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1199 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1200 }
duke@435 1201
duke@435 1202 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1203 assert_lock_strong(freelistLock());
duke@435 1204 HeapWord* res = NULL;
duke@435 1205 assert(size == adjustObjectSize(size),
duke@435 1206 "use adjustObjectSize() before calling into allocate()");
duke@435 1207
duke@435 1208 if (_adaptive_freelists) {
duke@435 1209 res = allocate_adaptive_freelists(size);
duke@435 1210 } else { // non-adaptive free lists
duke@435 1211 res = allocate_non_adaptive_freelists(size);
duke@435 1212 }
duke@435 1213
duke@435 1214 if (res != NULL) {
duke@435 1215 // check that res does lie in this space!
duke@435 1216 assert(is_in_reserved(res), "Not in this space!");
duke@435 1217 assert(is_aligned((void*)res), "alignment check");
duke@435 1218
duke@435 1219 FreeChunk* fc = (FreeChunk*)res;
duke@435 1220 fc->markNotFree();
duke@435 1221 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1222 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1223 // Verify that the block offset table shows this to
duke@435 1224 // be a single block, but not one which is unallocated.
duke@435 1225 _bt.verify_single_block(res, size);
duke@435 1226 _bt.verify_not_unallocated(res, size);
duke@435 1227 // mangle a just allocated object with a distinct pattern.
duke@435 1228 debug_only(fc->mangleAllocated(size));
duke@435 1229 }
duke@435 1230
duke@435 1231 return res;
duke@435 1232 }
duke@435 1233
duke@435 1234 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1235 HeapWord* res = NULL;
duke@435 1236 // try and use linear allocation for smaller blocks
duke@435 1237 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1238 // if successful, the following also adjusts block offset table
duke@435 1239 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1240 }
duke@435 1241 // Else triage to indexed lists for smaller sizes
duke@435 1242 if (res == NULL) {
duke@435 1243 if (size < SmallForDictionary) {
duke@435 1244 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1245 } else {
duke@435 1246 // else get it from the big dictionary; if even this doesn't
duke@435 1247 // work we are out of luck.
duke@435 1248 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1249 }
duke@435 1250 }
duke@435 1251
duke@435 1252 return res;
duke@435 1253 }
duke@435 1254
duke@435 1255 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1256 assert_lock_strong(freelistLock());
duke@435 1257 HeapWord* res = NULL;
duke@435 1258 assert(size == adjustObjectSize(size),
duke@435 1259 "use adjustObjectSize() before calling into allocate()");
duke@435 1260
duke@435 1261 // Strategy
duke@435 1262 // if small
duke@435 1263 // exact size from small object indexed list if small
duke@435 1264 // small or large linear allocation block (linAB) as appropriate
duke@435 1265 // take from lists of greater sized chunks
duke@435 1266 // else
duke@435 1267 // dictionary
duke@435 1268 // small or large linear allocation block if it has the space
duke@435 1269 // Try allocating exact size from indexTable first
duke@435 1270 if (size < IndexSetSize) {
duke@435 1271 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1272 if(res != NULL) {
duke@435 1273 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1274 "Not removed from free list");
duke@435 1275 // no block offset table adjustment is necessary on blocks in
duke@435 1276 // the indexed lists.
duke@435 1277
duke@435 1278 // Try allocating from the small LinAB
duke@435 1279 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1280 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1281 // if successful, the above also adjusts block offset table
duke@435 1282 // Note that this call will refill the LinAB to
duke@435 1283 // satisfy the request. This is different that
duke@435 1284 // evm.
duke@435 1285 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1286 } else {
duke@435 1287 // Raid the exact free lists larger than size, even if they are not
duke@435 1288 // overpopulated.
duke@435 1289 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1290 }
duke@435 1291 } else {
duke@435 1292 // Big objects get allocated directly from the dictionary.
duke@435 1293 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1294 if (res == NULL) {
duke@435 1295 // Try hard not to fail since an allocation failure will likely
duke@435 1296 // trigger a synchronous GC. Try to get the space from the
duke@435 1297 // allocation blocks.
duke@435 1298 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1299 }
duke@435 1300 }
duke@435 1301
duke@435 1302 return res;
duke@435 1303 }
duke@435 1304
duke@435 1305 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1306 // when promoting obj.
duke@435 1307 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1308 // Depending on the object size, expansion may require refilling either a
duke@435 1309 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1310 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1311 size_t space = obj_size;
duke@435 1312 if (!_adaptive_freelists) {
duke@435 1313 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1314 }
duke@435 1315 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1316 return space;
duke@435 1317 }
duke@435 1318
duke@435 1319 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1320 FreeChunk* ret;
duke@435 1321
duke@435 1322 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1323 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1324 "Should not be here");
duke@435 1325
duke@435 1326 size_t i;
duke@435 1327 size_t currSize = numWords + MinChunkSize;
duke@435 1328 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1329 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1330 FreeList* fl = &_indexedFreeList[i];
duke@435 1331 if (fl->head()) {
duke@435 1332 ret = getFromListGreater(fl, numWords);
duke@435 1333 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1334 return ret;
duke@435 1335 }
duke@435 1336 }
duke@435 1337
duke@435 1338 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1339 (size_t)(numWords + MinChunkSize));
duke@435 1340
duke@435 1341 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1342 fragmentation that can't be handled. */
duke@435 1343 {
duke@435 1344 ret = dictionary()->getChunk(currSize);
duke@435 1345 if (ret != NULL) {
duke@435 1346 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1347 "Chunk is too small");
duke@435 1348 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1349 /* Carve returned chunk. */
duke@435 1350 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1351 /* Label this as no longer a free chunk. */
duke@435 1352 assert(ret->isFree(), "This chunk should be free");
duke@435 1353 ret->linkPrev(NULL);
duke@435 1354 }
duke@435 1355 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1356 return ret;
duke@435 1357 }
duke@435 1358 ShouldNotReachHere();
duke@435 1359 }
duke@435 1360
ysr@3220 1361 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1362 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1363 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1364 }
duke@435 1365
ysr@3220 1366 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1367 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1368 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1369 "Linear allocation block shows incorrect size");
ysr@3220 1370 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1371 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1372 }
ysr@3220 1373
ysr@3220 1374 // Check if the purported free chunk is present either as a linear
ysr@3220 1375 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1376 // or the larger free blocks kept in the binary tree dictionary.
duke@435 1377 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
ysr@3220 1378 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1379 return true;
ysr@3220 1380 } else if (fc->size() < IndexSetSize) {
ysr@3220 1381 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1382 } else {
duke@435 1383 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1384 }
duke@435 1385 }
duke@435 1386
duke@435 1387 #ifndef PRODUCT
duke@435 1388 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1389 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1390 }
ysr@1580 1391
ysr@1580 1392 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1393 CMSLockVerifier::assert_locked(lock);
ysr@1580 1394 }
duke@435 1395 #endif
duke@435 1396
duke@435 1397 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1398 // In the parallel case, the main thread holds the free list lock
duke@435 1399 // on behalf the parallel threads.
duke@435 1400 FreeChunk* fc;
duke@435 1401 {
duke@435 1402 // If GC is parallel, this might be called by several threads.
duke@435 1403 // This should be rare enough that the locking overhead won't affect
duke@435 1404 // the sequential code.
duke@435 1405 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1406 Mutex::_no_safepoint_check_flag);
duke@435 1407 fc = getChunkFromDictionary(size);
duke@435 1408 }
duke@435 1409 if (fc != NULL) {
duke@435 1410 fc->dontCoalesce();
duke@435 1411 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1412 // Verify that the block offset table shows this to
duke@435 1413 // be a single block, but not one which is unallocated.
duke@435 1414 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1415 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1416 }
duke@435 1417 return fc;
duke@435 1418 }
duke@435 1419
coleenp@548 1420 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1421 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1422 assert_locked();
duke@435 1423
duke@435 1424 // if we are tracking promotions, then first ensure space for
duke@435 1425 // promotion (including spooling space for saving header if necessary).
duke@435 1426 // then allocate and copy, then track promoted info if needed.
duke@435 1427 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1428 // be displaced and in this case restoration of the mark word
duke@435 1429 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1430 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1431 return NULL;
duke@435 1432 }
duke@435 1433 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1434 // additional call through the allocate(size_t) form. Having
duke@435 1435 // the compile inline the call is problematic because allocate(size_t)
duke@435 1436 // is a virtual method.
duke@435 1437 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1438 if (res != NULL) {
duke@435 1439 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1440 // if we should be tracking promotions, do so.
duke@435 1441 if (_promoInfo.tracking()) {
duke@435 1442 _promoInfo.track((PromotedObject*)res);
duke@435 1443 }
duke@435 1444 }
duke@435 1445 return oop(res);
duke@435 1446 }
duke@435 1447
duke@435 1448 HeapWord*
duke@435 1449 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1450 assert_locked();
duke@435 1451 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1452 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1453 "maximum from smallLinearAllocBlock");
duke@435 1454 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1455 }
duke@435 1456
duke@435 1457 HeapWord*
duke@435 1458 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1459 size_t size) {
duke@435 1460 assert_locked();
duke@435 1461 assert(size >= MinChunkSize, "too small");
duke@435 1462 HeapWord* res = NULL;
duke@435 1463 // Try to do linear allocation from blk, making sure that
duke@435 1464 if (blk->_word_size == 0) {
duke@435 1465 // We have probably been unable to fill this either in the prologue or
duke@435 1466 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1467 // next time.
duke@435 1468 assert(blk->_ptr == NULL, "consistency check");
duke@435 1469 return NULL;
duke@435 1470 }
duke@435 1471 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1472 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1473 if (res != NULL) return res;
duke@435 1474
duke@435 1475 // about to exhaust this linear allocation block
duke@435 1476 if (blk->_word_size == size) { // exactly satisfied
duke@435 1477 res = blk->_ptr;
duke@435 1478 _bt.allocated(res, blk->_word_size);
duke@435 1479 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1480 size_t sz = blk->_word_size;
duke@435 1481 // Update _unallocated_block if the size is such that chunk would be
duke@435 1482 // returned to the indexed free list. All other chunks in the indexed
duke@435 1483 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1484 // has already been adjusted for them. Do it here so that the cost
duke@435 1485 // for all chunks added back to the indexed free lists.
ysr@1580 1486 if (sz < SmallForDictionary) {
ysr@1580 1487 _bt.allocated(blk->_ptr, sz);
duke@435 1488 }
duke@435 1489 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1490 addChunkToFreeLists(blk->_ptr, sz);
ysr@1580 1491 splitBirth(sz);
duke@435 1492 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1493 } else {
duke@435 1494 // A refilled block would not satisfy the request.
duke@435 1495 return NULL;
duke@435 1496 }
duke@435 1497
duke@435 1498 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1499 refillLinearAllocBlock(blk);
duke@435 1500 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1501 "block was replenished");
duke@435 1502 if (res != NULL) {
duke@435 1503 splitBirth(size);
duke@435 1504 repairLinearAllocBlock(blk);
duke@435 1505 } else if (blk->_ptr != NULL) {
duke@435 1506 res = blk->_ptr;
duke@435 1507 size_t blk_size = blk->_word_size;
duke@435 1508 blk->_word_size -= size;
duke@435 1509 blk->_ptr += size;
duke@435 1510 splitBirth(size);
duke@435 1511 repairLinearAllocBlock(blk);
duke@435 1512 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1513 // view of the BOT and free blocks.
duke@435 1514 // Above must occur before BOT is updated below.
ysr@2071 1515 OrderAccess::storestore();
duke@435 1516 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1517 }
duke@435 1518 return res;
duke@435 1519 }
duke@435 1520
duke@435 1521 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1522 LinearAllocBlock* blk,
duke@435 1523 size_t size) {
duke@435 1524 assert_locked();
duke@435 1525 assert(size >= MinChunkSize, "too small");
duke@435 1526
duke@435 1527 HeapWord* res = NULL;
duke@435 1528 // This is the common case. Keep it simple.
duke@435 1529 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1530 assert(blk->_ptr != NULL, "consistency check");
duke@435 1531 res = blk->_ptr;
duke@435 1532 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1533 // indicates the start of the linAB. The split_block() updates the
duke@435 1534 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1535 // next chunk to be allocated).
duke@435 1536 size_t blk_size = blk->_word_size;
duke@435 1537 blk->_word_size -= size;
duke@435 1538 blk->_ptr += size;
duke@435 1539 splitBirth(size);
duke@435 1540 repairLinearAllocBlock(blk);
duke@435 1541 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1542 // view of the BOT and free blocks.
duke@435 1543 // Above must occur before BOT is updated below.
ysr@2071 1544 OrderAccess::storestore();
duke@435 1545 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1546 _bt.allocated(res, size);
duke@435 1547 }
duke@435 1548 return res;
duke@435 1549 }
duke@435 1550
duke@435 1551 FreeChunk*
duke@435 1552 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1553 assert_locked();
duke@435 1554 assert(size < SmallForDictionary, "just checking");
duke@435 1555 FreeChunk* res;
duke@435 1556 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1557 if (res == NULL) {
duke@435 1558 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1559 }
duke@435 1560 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1561 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1562 return res;
duke@435 1563 }
duke@435 1564
duke@435 1565 FreeChunk*
ysr@1580 1566 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1567 bool replenish) {
duke@435 1568 assert_locked();
duke@435 1569 FreeChunk* fc = NULL;
duke@435 1570 if (size < SmallForDictionary) {
duke@435 1571 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1572 _indexedFreeList[size].surplus() <= 0,
duke@435 1573 "List for this size should be empty or under populated");
duke@435 1574 // Try best fit in exact lists before replenishing the list
duke@435 1575 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1576 // Replenish list.
duke@435 1577 //
duke@435 1578 // Things tried that failed.
duke@435 1579 // Tried allocating out of the two LinAB's first before
duke@435 1580 // replenishing lists.
duke@435 1581 // Tried small linAB of size 256 (size in indexed list)
duke@435 1582 // and replenishing indexed lists from the small linAB.
duke@435 1583 //
duke@435 1584 FreeChunk* newFc = NULL;
ysr@1580 1585 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1586 if (replenish_size < SmallForDictionary) {
duke@435 1587 // Do not replenish from an underpopulated size.
duke@435 1588 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1589 _indexedFreeList[replenish_size].head() != NULL) {
ysr@1580 1590 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
ysr@1580 1591 } else if (bestFitFirst()) {
duke@435 1592 newFc = bestFitSmall(replenish_size);
duke@435 1593 }
duke@435 1594 }
ysr@1580 1595 if (newFc == NULL && replenish_size > size) {
ysr@1580 1596 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1597 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1598 }
ysr@1580 1599 // Note: The stats update re split-death of block obtained above
ysr@1580 1600 // will be recorded below precisely when we know we are going to
ysr@1580 1601 // be actually splitting it into more than one pieces below.
duke@435 1602 if (newFc != NULL) {
ysr@1580 1603 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1604 // Replenish this list and return one block to caller.
ysr@1580 1605 size_t i;
ysr@1580 1606 FreeChunk *curFc, *nextFc;
ysr@1580 1607 size_t num_blk = newFc->size() / size;
ysr@1580 1608 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1609 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1610 if (num_blk > 1) {
ysr@1580 1611 // we are sure we will be splitting the block just obtained
ysr@1580 1612 // into multiple pieces; record the split-death of the original
ysr@1580 1613 splitDeath(replenish_size);
ysr@1580 1614 }
ysr@1580 1615 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1616 // The last chunk is not added to the lists but is returned as the
ysr@1580 1617 // free chunk.
ysr@1580 1618 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1619 i = 0;
ysr@1580 1620 i < (num_blk - 1);
ysr@1580 1621 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1622 i++) {
ysr@1580 1623 curFc->setSize(size);
ysr@1580 1624 // Don't record this as a return in order to try and
ysr@1580 1625 // determine the "returns" from a GC.
ysr@1580 1626 _bt.verify_not_unallocated((HeapWord*) fc, size);
ysr@1580 1627 _indexedFreeList[size].returnChunkAtTail(curFc, false);
ysr@1580 1628 _bt.mark_block((HeapWord*)curFc, size);
ysr@1580 1629 splitBirth(size);
ysr@1580 1630 // Don't record the initial population of the indexed list
ysr@1580 1631 // as a split birth.
ysr@1580 1632 }
ysr@1580 1633
ysr@1580 1634 // check that the arithmetic was OK above
ysr@1580 1635 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1636 "inconsistency in carving newFc");
duke@435 1637 curFc->setSize(size);
duke@435 1638 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1639 splitBirth(size);
ysr@1580 1640 fc = curFc;
ysr@1580 1641 } else {
ysr@1580 1642 // Return entire block to caller
ysr@1580 1643 fc = newFc;
duke@435 1644 }
duke@435 1645 }
duke@435 1646 }
duke@435 1647 } else {
duke@435 1648 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1649 // replenish the indexed free list.
duke@435 1650 fc = getChunkFromDictionaryExact(size);
duke@435 1651 }
ysr@1580 1652 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1653 return fc;
duke@435 1654 }
duke@435 1655
duke@435 1656 FreeChunk*
duke@435 1657 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1658 assert_locked();
duke@435 1659 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1660 if (fc == NULL) {
duke@435 1661 return NULL;
duke@435 1662 }
duke@435 1663 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1664 if (fc->size() >= size + MinChunkSize) {
duke@435 1665 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1666 }
duke@435 1667 assert(fc->size() >= size, "chunk too small");
duke@435 1668 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1669 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1670 return fc;
duke@435 1671 }
duke@435 1672
duke@435 1673 FreeChunk*
duke@435 1674 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1675 assert_locked();
duke@435 1676 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1677 if (fc == NULL) {
duke@435 1678 return fc;
duke@435 1679 }
duke@435 1680 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1681 if (fc->size() == size) {
duke@435 1682 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1683 return fc;
duke@435 1684 }
duke@435 1685 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1686 if (fc->size() < size + MinChunkSize) {
duke@435 1687 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1688 returnChunkToDictionary(fc);
duke@435 1689 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1690 if (fc == NULL) {
duke@435 1691 return NULL;
duke@435 1692 }
duke@435 1693 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1694 }
duke@435 1695 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1696 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1697 assert(fc->size() == size, "chunk is wrong size");
duke@435 1698 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1699 return fc;
duke@435 1700 }
duke@435 1701
duke@435 1702 void
duke@435 1703 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1704 assert_locked();
duke@435 1705
duke@435 1706 size_t size = chunk->size();
duke@435 1707 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1708 // adjust _unallocated_block downward, as necessary
duke@435 1709 _bt.freed((HeapWord*)chunk, size);
duke@435 1710 _dictionary->returnChunk(chunk);
ysr@1580 1711 #ifndef PRODUCT
ysr@1580 1712 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1713 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1714 }
ysr@1580 1715 #endif // PRODUCT
duke@435 1716 }
duke@435 1717
duke@435 1718 void
duke@435 1719 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1720 assert_locked();
duke@435 1721 size_t size = fc->size();
duke@435 1722 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1723 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1724 if (_adaptive_freelists) {
duke@435 1725 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1726 } else {
duke@435 1727 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1728 }
ysr@1580 1729 #ifndef PRODUCT
ysr@1580 1730 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1731 _indexedFreeList[size].verify_stats();
ysr@1580 1732 }
ysr@1580 1733 #endif // PRODUCT
duke@435 1734 }
duke@435 1735
duke@435 1736 // Add chunk to end of last block -- if it's the largest
duke@435 1737 // block -- and update BOT and census data. We would
duke@435 1738 // of course have preferred to coalesce it with the
duke@435 1739 // last block, but it's currently less expensive to find the
duke@435 1740 // largest block than it is to find the last.
duke@435 1741 void
duke@435 1742 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1743 HeapWord* chunk, size_t size) {
duke@435 1744 // check that the chunk does lie in this space!
duke@435 1745 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1746 // One of the parallel gc task threads may be here
duke@435 1747 // whilst others are allocating.
duke@435 1748 Mutex* lock = NULL;
duke@435 1749 if (ParallelGCThreads != 0) {
duke@435 1750 lock = &_parDictionaryAllocLock;
duke@435 1751 }
duke@435 1752 FreeChunk* ec;
duke@435 1753 {
duke@435 1754 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1755 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1756 if (ec != NULL && ec->end() == chunk) {
duke@435 1757 // It's a coterminal block - we can coalesce.
duke@435 1758 size_t old_size = ec->size();
duke@435 1759 coalDeath(old_size);
duke@435 1760 removeChunkFromDictionary(ec);
duke@435 1761 size += old_size;
duke@435 1762 } else {
duke@435 1763 ec = (FreeChunk*)chunk;
duke@435 1764 }
duke@435 1765 }
duke@435 1766 ec->setSize(size);
duke@435 1767 debug_only(ec->mangleFreed(size));
duke@435 1768 if (size < SmallForDictionary) {
duke@435 1769 lock = _indexedFreeListParLocks[size];
duke@435 1770 }
duke@435 1771 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1772 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1773 // record the birth under the lock since the recording involves
duke@435 1774 // manipulation of the list on which the chunk lives and
duke@435 1775 // if the chunk is allocated and is the last on the list,
duke@435 1776 // the list can go away.
duke@435 1777 coalBirth(size);
duke@435 1778 }
duke@435 1779
duke@435 1780 void
duke@435 1781 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1782 size_t size) {
duke@435 1783 // check that the chunk does lie in this space!
duke@435 1784 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1785 assert_locked();
duke@435 1786 _bt.verify_single_block(chunk, size);
duke@435 1787
duke@435 1788 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1789 fc->setSize(size);
duke@435 1790 debug_only(fc->mangleFreed(size));
duke@435 1791 if (size < SmallForDictionary) {
duke@435 1792 returnChunkToFreeList(fc);
duke@435 1793 } else {
duke@435 1794 returnChunkToDictionary(fc);
duke@435 1795 }
duke@435 1796 }
duke@435 1797
duke@435 1798 void
duke@435 1799 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1800 size_t size, bool coalesced) {
duke@435 1801 assert_locked();
duke@435 1802 assert(chunk != NULL, "null chunk");
duke@435 1803 if (coalesced) {
duke@435 1804 // repair BOT
duke@435 1805 _bt.single_block(chunk, size);
duke@435 1806 }
duke@435 1807 addChunkToFreeLists(chunk, size);
duke@435 1808 }
duke@435 1809
duke@435 1810 // We _must_ find the purported chunk on our free lists;
duke@435 1811 // we assert if we don't.
duke@435 1812 void
duke@435 1813 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1814 size_t size = fc->size();
duke@435 1815 assert_locked();
duke@435 1816 debug_only(verifyFreeLists());
duke@435 1817 if (size < SmallForDictionary) {
duke@435 1818 removeChunkFromIndexedFreeList(fc);
duke@435 1819 } else {
duke@435 1820 removeChunkFromDictionary(fc);
duke@435 1821 }
duke@435 1822 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1823 debug_only(verifyFreeLists());
duke@435 1824 }
duke@435 1825
duke@435 1826 void
duke@435 1827 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1828 size_t size = fc->size();
duke@435 1829 assert_locked();
duke@435 1830 assert(fc != NULL, "null chunk");
duke@435 1831 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1832 _dictionary->removeChunk(fc);
duke@435 1833 // adjust _unallocated_block upward, as necessary
duke@435 1834 _bt.allocated((HeapWord*)fc, size);
duke@435 1835 }
duke@435 1836
duke@435 1837 void
duke@435 1838 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1839 assert_locked();
duke@435 1840 size_t size = fc->size();
duke@435 1841 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1842 NOT_PRODUCT(
duke@435 1843 if (FLSVerifyIndexTable) {
duke@435 1844 verifyIndexedFreeList(size);
duke@435 1845 }
duke@435 1846 )
duke@435 1847 _indexedFreeList[size].removeChunk(fc);
duke@435 1848 NOT_PRODUCT(
duke@435 1849 if (FLSVerifyIndexTable) {
duke@435 1850 verifyIndexedFreeList(size);
duke@435 1851 }
duke@435 1852 )
duke@435 1853 }
duke@435 1854
duke@435 1855 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1856 /* A hint is the next larger size that has a surplus.
duke@435 1857 Start search at a size large enough to guarantee that
duke@435 1858 the excess is >= MIN_CHUNK. */
duke@435 1859 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1860 if (start < IndexSetSize) {
duke@435 1861 FreeList* it = _indexedFreeList;
duke@435 1862 size_t hint = _indexedFreeList[start].hint();
duke@435 1863 while (hint < IndexSetSize) {
duke@435 1864 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1865 FreeList *fl = &_indexedFreeList[hint];
duke@435 1866 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1867 // Found a list with surplus, reset original hint
duke@435 1868 // and split out a free chunk which is returned.
duke@435 1869 _indexedFreeList[start].set_hint(hint);
duke@435 1870 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1871 assert(res == NULL || res->isFree(),
duke@435 1872 "Should be returning a free chunk");
duke@435 1873 return res;
duke@435 1874 }
duke@435 1875 hint = fl->hint(); /* keep looking */
duke@435 1876 }
duke@435 1877 /* None found. */
duke@435 1878 it[start].set_hint(IndexSetSize);
duke@435 1879 }
duke@435 1880 return NULL;
duke@435 1881 }
duke@435 1882
duke@435 1883 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1884 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1885 size_t numWords) {
duke@435 1886 FreeChunk *curr = fl->head();
duke@435 1887 size_t oldNumWords = curr->size();
duke@435 1888 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1889 assert(curr != NULL, "List is empty");
duke@435 1890 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1891 "Size of chunks in the list is too small");
duke@435 1892
duke@435 1893 fl->removeChunk(curr);
duke@435 1894 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1895 // smallSplit(oldNumWords, numWords);
duke@435 1896 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1897 // Does anything have to be done for the remainder in terms of
duke@435 1898 // fixing the card table?
duke@435 1899 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1900 "Should be returning a free chunk");
duke@435 1901 return new_chunk;
duke@435 1902 }
duke@435 1903
duke@435 1904 FreeChunk*
duke@435 1905 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1906 size_t new_size) {
duke@435 1907 assert_locked();
duke@435 1908 size_t size = chunk->size();
duke@435 1909 assert(size > new_size, "Split from a smaller block?");
duke@435 1910 assert(is_aligned(chunk), "alignment problem");
duke@435 1911 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1912 size_t rem_size = size - new_size;
duke@435 1913 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1914 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1915 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1916 assert(is_aligned(ffc), "alignment problem");
duke@435 1917 ffc->setSize(rem_size);
duke@435 1918 ffc->linkNext(NULL);
duke@435 1919 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1920 // Above must occur before BOT is updated below.
duke@435 1921 // adjust block offset table
ysr@2071 1922 OrderAccess::storestore();
ysr@2071 1923 assert(chunk->isFree() && ffc->isFree(), "Error");
duke@435 1924 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1925 if (rem_size < SmallForDictionary) {
duke@435 1926 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1927 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
duke@435 1928 returnChunkToFreeList(ffc);
duke@435 1929 split(size, rem_size);
duke@435 1930 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1931 } else {
duke@435 1932 returnChunkToDictionary(ffc);
duke@435 1933 split(size ,rem_size);
duke@435 1934 }
duke@435 1935 chunk->setSize(new_size);
duke@435 1936 return chunk;
duke@435 1937 }
duke@435 1938
duke@435 1939 void
duke@435 1940 CompactibleFreeListSpace::sweep_completed() {
duke@435 1941 // Now that space is probably plentiful, refill linear
duke@435 1942 // allocation blocks as needed.
duke@435 1943 refillLinearAllocBlocksIfNeeded();
duke@435 1944 }
duke@435 1945
duke@435 1946 void
duke@435 1947 CompactibleFreeListSpace::gc_prologue() {
duke@435 1948 assert_locked();
duke@435 1949 if (PrintFLSStatistics != 0) {
duke@435 1950 gclog_or_tty->print("Before GC:\n");
duke@435 1951 reportFreeListStatistics();
duke@435 1952 }
duke@435 1953 refillLinearAllocBlocksIfNeeded();
duke@435 1954 }
duke@435 1955
duke@435 1956 void
duke@435 1957 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1958 assert_locked();
duke@435 1959 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1960 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1961 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1962 }
duke@435 1963 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1964 _promoInfo.stopTrackingPromotions();
duke@435 1965 repairLinearAllocationBlocks();
duke@435 1966 // Print Space's stats
duke@435 1967 if (PrintFLSStatistics != 0) {
duke@435 1968 gclog_or_tty->print("After GC:\n");
duke@435 1969 reportFreeListStatistics();
duke@435 1970 }
duke@435 1971 }
duke@435 1972
duke@435 1973 // Iteration support, mostly delegated from a CMS generation
duke@435 1974
duke@435 1975 void CompactibleFreeListSpace::save_marks() {
ysr@2825 1976 assert(Thread::current()->is_VM_thread(),
ysr@2825 1977 "Global variable should only be set when single-threaded");
ysr@2825 1978 // Mark the "end" of the used space at the time of this call;
duke@435 1979 // note, however, that promoted objects from this point
duke@435 1980 // on are tracked in the _promoInfo below.
ysr@2071 1981 set_saved_mark_word(unallocated_block());
ysr@2825 1982 #ifdef ASSERT
ysr@2825 1983 // Check the sanity of save_marks() etc.
ysr@2825 1984 MemRegion ur = used_region();
ysr@2825 1985 MemRegion urasm = used_region_at_save_marks();
ysr@2825 1986 assert(ur.contains(urasm),
ysr@2825 1987 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 1988 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
ysr@2825 1989 ur.start(), ur.end(), urasm.start(), urasm.end()));
ysr@2825 1990 #endif
duke@435 1991 // inform allocator that promotions should be tracked.
duke@435 1992 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1993 _promoInfo.startTrackingPromotions();
duke@435 1994 }
duke@435 1995
duke@435 1996 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1997 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 1998 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 1999 "Shouldn't be called if using parallel gc.");
duke@435 2000 return _promoInfo.noPromotions();
duke@435 2001 }
duke@435 2002
duke@435 2003 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2004 \
duke@435 2005 void CompactibleFreeListSpace:: \
duke@435 2006 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2007 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2008 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2009 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2010 /* \
duke@435 2011 * This also restores any displaced headers and removes the elements from \
duke@435 2012 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2013 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2014 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2015 */ \
duke@435 2016 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2017 }
duke@435 2018
duke@435 2019 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2020
duke@435 2021
duke@435 2022 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 2023 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 2024 guarantee(false, "NYI");
duke@435 2025 }
duke@435 2026
ysr@447 2027 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2028 return _smallLinearAllocBlock._word_size == 0;
duke@435 2029 }
duke@435 2030
duke@435 2031 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2032 // Fix up linear allocation blocks to look like free blocks
duke@435 2033 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2034 }
duke@435 2035
duke@435 2036 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2037 assert_locked();
duke@435 2038 if (blk->_ptr != NULL) {
duke@435 2039 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2040 "Minimum block size requirement");
duke@435 2041 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 2042 fc->setSize(blk->_word_size);
duke@435 2043 fc->linkPrev(NULL); // mark as free
duke@435 2044 fc->dontCoalesce();
duke@435 2045 assert(fc->isFree(), "just marked it free");
duke@435 2046 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2047 }
duke@435 2048 }
duke@435 2049
duke@435 2050 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2051 assert_locked();
duke@435 2052 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2053 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2054 "Size of linAB should be zero if the ptr is NULL");
duke@435 2055 // Reset the linAB refill and allocation size limit.
duke@435 2056 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2057 }
duke@435 2058 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2059 }
duke@435 2060
duke@435 2061 void
duke@435 2062 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2063 assert_locked();
duke@435 2064 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2065 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2066 "blk invariant");
duke@435 2067 if (blk->_ptr == NULL) {
duke@435 2068 refillLinearAllocBlock(blk);
duke@435 2069 }
duke@435 2070 if (PrintMiscellaneous && Verbose) {
duke@435 2071 if (blk->_word_size == 0) {
duke@435 2072 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2073 }
duke@435 2074 }
duke@435 2075 }
duke@435 2076
duke@435 2077 void
duke@435 2078 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2079 assert_locked();
duke@435 2080 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2081 "linear allocation block should be empty");
duke@435 2082 FreeChunk* fc;
duke@435 2083 if (blk->_refillSize < SmallForDictionary &&
duke@435 2084 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2085 // A linAB's strategy might be to use small sizes to reduce
duke@435 2086 // fragmentation but still get the benefits of allocation from a
duke@435 2087 // linAB.
duke@435 2088 } else {
duke@435 2089 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2090 }
duke@435 2091 if (fc != NULL) {
duke@435 2092 blk->_ptr = (HeapWord*)fc;
duke@435 2093 blk->_word_size = fc->size();
duke@435 2094 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2095 }
duke@435 2096 }
duke@435 2097
ysr@447 2098 // Support for concurrent collection policy decisions.
ysr@447 2099 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2100 // In the future we might want to add in frgamentation stats --
ysr@447 2101 // including erosion of the "mountain" into this decision as well.
ysr@447 2102 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2103 }
ysr@447 2104
duke@435 2105 // Support for compaction
duke@435 2106
duke@435 2107 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2108 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2109 // prepare_for_compaction() uses the space between live objects
duke@435 2110 // so that later phase can skip dead space quickly. So verification
duke@435 2111 // of the free lists doesn't work after.
duke@435 2112 }
duke@435 2113
duke@435 2114 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2115 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2116
duke@435 2117 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2118 // In other versions of adjust_pointers(), a bail out
duke@435 2119 // based on the amount of live data in the generation
duke@435 2120 // (i.e., if 0, bail out) may be used.
duke@435 2121 // Cannot test used() == 0 here because the free lists have already
duke@435 2122 // been mangled by the compaction.
duke@435 2123
duke@435 2124 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2125 // See note about verification in prepare_for_compaction().
duke@435 2126 }
duke@435 2127
duke@435 2128 void CompactibleFreeListSpace::compact() {
duke@435 2129 SCAN_AND_COMPACT(obj_size);
duke@435 2130 }
duke@435 2131
duke@435 2132 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2133 // where fbs is free block sizes
duke@435 2134 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2135 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2136 double frag = 0.0;
duke@435 2137 size_t i;
duke@435 2138
duke@435 2139 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2140 double sz = i;
duke@435 2141 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2142 }
duke@435 2143
duke@435 2144 double totFree = itabFree +
duke@435 2145 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 2146 if (totFree > 0) {
duke@435 2147 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2148 (totFree * totFree));
duke@435 2149 frag = (double)1.0 - frag;
duke@435 2150 } else {
duke@435 2151 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2152 }
duke@435 2153 return frag;
duke@435 2154 }
duke@435 2155
duke@435 2156 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2157 float inter_sweep_current,
ysr@1580 2158 float inter_sweep_estimate,
ysr@1580 2159 float intra_sweep_estimate) {
duke@435 2160 assert_locked();
duke@435 2161 size_t i;
duke@435 2162 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2163 FreeList* fl = &_indexedFreeList[i];
ysr@1580 2164 if (PrintFLSStatistics > 1) {
ysr@1580 2165 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2166 }
ysr@1580 2167 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
ysr@1580 2168 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
duke@435 2169 fl->set_beforeSweep(fl->count());
duke@435 2170 fl->set_bfrSurp(fl->surplus());
duke@435 2171 }
ysr@1580 2172 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
duke@435 2173 inter_sweep_current,
ysr@1580 2174 inter_sweep_estimate,
ysr@1580 2175 intra_sweep_estimate);
duke@435 2176 }
duke@435 2177
duke@435 2178 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2179 assert_locked();
duke@435 2180 size_t i;
duke@435 2181 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2182 FreeList *fl = &_indexedFreeList[i];
duke@435 2183 fl->set_surplus(fl->count() -
ysr@1580 2184 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2185 }
duke@435 2186 }
duke@435 2187
duke@435 2188 void CompactibleFreeListSpace::setFLHints() {
duke@435 2189 assert_locked();
duke@435 2190 size_t i;
duke@435 2191 size_t h = IndexSetSize;
duke@435 2192 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2193 FreeList *fl = &_indexedFreeList[i];
duke@435 2194 fl->set_hint(h);
duke@435 2195 if (fl->surplus() > 0) {
duke@435 2196 h = i;
duke@435 2197 }
duke@435 2198 }
duke@435 2199 }
duke@435 2200
duke@435 2201 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2202 assert_locked();
duke@435 2203 int i;
duke@435 2204 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2205 FreeList *fl = &_indexedFreeList[i];
duke@435 2206 fl->set_prevSweep(fl->count());
duke@435 2207 fl->set_coalBirths(0);
duke@435 2208 fl->set_coalDeaths(0);
duke@435 2209 fl->set_splitBirths(0);
duke@435 2210 fl->set_splitDeaths(0);
duke@435 2211 }
duke@435 2212 }
duke@435 2213
ysr@447 2214 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2215 if (PrintFLSStatistics > 0) {
ysr@1580 2216 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
ysr@1580 2217 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2218 largestAddr);
ysr@1580 2219 }
duke@435 2220 setFLSurplus();
duke@435 2221 setFLHints();
duke@435 2222 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2223 printFLCensus(sweep_count);
duke@435 2224 }
duke@435 2225 clearFLCensus();
duke@435 2226 assert_locked();
ysr@1580 2227 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
duke@435 2228 }
duke@435 2229
duke@435 2230 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2231 if (size < SmallForDictionary) {
duke@435 2232 FreeList *fl = &_indexedFreeList[size];
duke@435 2233 return (fl->coalDesired() < 0) ||
duke@435 2234 ((int)fl->count() > fl->coalDesired());
duke@435 2235 } else {
duke@435 2236 return dictionary()->coalDictOverPopulated(size);
duke@435 2237 }
duke@435 2238 }
duke@435 2239
duke@435 2240 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2241 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2242 FreeList *fl = &_indexedFreeList[size];
duke@435 2243 fl->increment_coalBirths();
duke@435 2244 fl->increment_surplus();
duke@435 2245 }
duke@435 2246
duke@435 2247 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2248 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2249 FreeList *fl = &_indexedFreeList[size];
duke@435 2250 fl->increment_coalDeaths();
duke@435 2251 fl->decrement_surplus();
duke@435 2252 }
duke@435 2253
duke@435 2254 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2255 if (size < SmallForDictionary) {
duke@435 2256 smallCoalBirth(size);
duke@435 2257 } else {
duke@435 2258 dictionary()->dictCensusUpdate(size,
duke@435 2259 false /* split */,
duke@435 2260 true /* birth */);
duke@435 2261 }
duke@435 2262 }
duke@435 2263
duke@435 2264 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2265 if(size < SmallForDictionary) {
duke@435 2266 smallCoalDeath(size);
duke@435 2267 } else {
duke@435 2268 dictionary()->dictCensusUpdate(size,
duke@435 2269 false /* split */,
duke@435 2270 false /* birth */);
duke@435 2271 }
duke@435 2272 }
duke@435 2273
duke@435 2274 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2275 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2276 FreeList *fl = &_indexedFreeList[size];
duke@435 2277 fl->increment_splitBirths();
duke@435 2278 fl->increment_surplus();
duke@435 2279 }
duke@435 2280
duke@435 2281 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2282 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2283 FreeList *fl = &_indexedFreeList[size];
duke@435 2284 fl->increment_splitDeaths();
duke@435 2285 fl->decrement_surplus();
duke@435 2286 }
duke@435 2287
duke@435 2288 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2289 if (size < SmallForDictionary) {
duke@435 2290 smallSplitBirth(size);
duke@435 2291 } else {
duke@435 2292 dictionary()->dictCensusUpdate(size,
duke@435 2293 true /* split */,
duke@435 2294 true /* birth */);
duke@435 2295 }
duke@435 2296 }
duke@435 2297
duke@435 2298 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2299 if (size < SmallForDictionary) {
duke@435 2300 smallSplitDeath(size);
duke@435 2301 } else {
duke@435 2302 dictionary()->dictCensusUpdate(size,
duke@435 2303 true /* split */,
duke@435 2304 false /* birth */);
duke@435 2305 }
duke@435 2306 }
duke@435 2307
duke@435 2308 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2309 size_t to2 = from - to1;
duke@435 2310 splitDeath(from);
duke@435 2311 splitBirth(to1);
duke@435 2312 splitBirth(to2);
duke@435 2313 }
duke@435 2314
duke@435 2315 void CompactibleFreeListSpace::print() const {
ysr@2294 2316 print_on(tty);
duke@435 2317 }
duke@435 2318
duke@435 2319 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2320 assert_locked();
duke@435 2321 repairLinearAllocationBlocks();
duke@435 2322 // Verify that the SpoolBlocks look like free blocks of
duke@435 2323 // appropriate sizes... To be done ...
duke@435 2324 }
duke@435 2325
duke@435 2326 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2327 private:
duke@435 2328 const CompactibleFreeListSpace* _sp;
duke@435 2329 const MemRegion _span;
ysr@2071 2330 HeapWord* _last_addr;
ysr@2071 2331 size_t _last_size;
ysr@2071 2332 bool _last_was_obj;
ysr@2071 2333 bool _last_was_live;
duke@435 2334
duke@435 2335 public:
duke@435 2336 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2337 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2338 _last_addr(NULL), _last_size(0),
ysr@2071 2339 _last_was_obj(false), _last_was_live(false) { }
duke@435 2340
coleenp@548 2341 virtual size_t do_blk(HeapWord* addr) {
duke@435 2342 size_t res;
ysr@2071 2343 bool was_obj = false;
ysr@2071 2344 bool was_live = false;
duke@435 2345 if (_sp->block_is_obj(addr)) {
ysr@2071 2346 was_obj = true;
duke@435 2347 oop p = oop(addr);
duke@435 2348 guarantee(p->is_oop(), "Should be an oop");
duke@435 2349 res = _sp->adjustObjectSize(p->size());
duke@435 2350 if (_sp->obj_is_alive(addr)) {
ysr@2071 2351 was_live = true;
duke@435 2352 p->verify();
duke@435 2353 }
duke@435 2354 } else {
duke@435 2355 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2356 res = fc->size();
duke@435 2357 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2358 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2359 "Chunk should be on a free list");
duke@435 2360 }
duke@435 2361 }
ysr@2071 2362 if (res == 0) {
ysr@2071 2363 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2364 gclog_or_tty->print_cr(
ysr@2071 2365 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2366 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
ysr@2071 2367 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
ysr@2071 2368 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2369 _sp->print_on(gclog_or_tty);
ysr@2071 2370 guarantee(false, "Seppuku!");
ysr@2071 2371 }
ysr@2071 2372 _last_addr = addr;
ysr@2071 2373 _last_size = res;
ysr@2071 2374 _last_was_obj = was_obj;
ysr@2071 2375 _last_was_live = was_live;
duke@435 2376 return res;
duke@435 2377 }
duke@435 2378 };
duke@435 2379
duke@435 2380 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2381 private:
duke@435 2382 const CMSCollector* _collector;
duke@435 2383 const CompactibleFreeListSpace* _sp;
duke@435 2384 const MemRegion _span;
duke@435 2385 const bool _past_remark;
duke@435 2386 const CMSBitMap* _bit_map;
duke@435 2387
coleenp@548 2388 protected:
coleenp@548 2389 void do_oop(void* p, oop obj) {
coleenp@548 2390 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2391 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2392 // Should be a valid object; the first disjunct below allows
coleenp@548 2393 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2394 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2395 // are spanned by _span (CMS heap) above.
coleenp@548 2396 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2397 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2398 "Should be an object");
coleenp@548 2399 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2400 obj->verify();
coleenp@548 2401 if (_past_remark) {
coleenp@548 2402 // Remark has been completed, the object should be marked
coleenp@548 2403 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2404 }
coleenp@548 2405 } else { // reference within CMS heap
coleenp@548 2406 if (_past_remark) {
coleenp@548 2407 // Remark has been completed -- so the referent should have
coleenp@548 2408 // been marked, if referring object is.
coleenp@548 2409 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2410 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2411 }
coleenp@548 2412 }
coleenp@548 2413 }
coleenp@548 2414 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2415 // the reference is from FLS, and points out of FLS
coleenp@548 2416 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2417 obj->verify();
coleenp@548 2418 }
coleenp@548 2419 }
coleenp@548 2420
coleenp@548 2421 template <class T> void do_oop_work(T* p) {
coleenp@548 2422 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2423 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2424 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2425 do_oop(p, obj);
coleenp@548 2426 }
coleenp@548 2427 }
coleenp@548 2428
duke@435 2429 public:
duke@435 2430 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2431 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2432 bool past_remark, CMSBitMap* bit_map) :
duke@435 2433 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2434 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2435
coleenp@548 2436 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2437 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2438 };
duke@435 2439
duke@435 2440 void CompactibleFreeListSpace::verify(bool ignored) const {
duke@435 2441 assert_lock_strong(&_freelistLock);
duke@435 2442 verify_objects_initialized();
duke@435 2443 MemRegion span = _collector->_span;
duke@435 2444 bool past_remark = (_collector->abstract_state() ==
duke@435 2445 CMSCollector::Sweeping);
duke@435 2446
duke@435 2447 ResourceMark rm;
duke@435 2448 HandleMark hm;
duke@435 2449
duke@435 2450 // Check integrity of CFL data structures
duke@435 2451 _promoInfo.verify();
duke@435 2452 _dictionary->verify();
duke@435 2453 if (FLSVerifyIndexTable) {
duke@435 2454 verifyIndexedFreeLists();
duke@435 2455 }
duke@435 2456 // Check integrity of all objects and free blocks in space
duke@435 2457 {
duke@435 2458 VerifyAllBlksClosure cl(this, span);
duke@435 2459 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2460 }
duke@435 2461 // Check that all references in the heap to FLS
duke@435 2462 // are to valid objects in FLS or that references in
duke@435 2463 // FLS are to valid objects elsewhere in the heap
duke@435 2464 if (FLSVerifyAllHeapReferences)
duke@435 2465 {
duke@435 2466 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2467 _collector->markBitMap());
duke@435 2468 CollectedHeap* ch = Universe::heap();
duke@435 2469 ch->oop_iterate(&cl); // all oops in generations
duke@435 2470 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2471 }
duke@435 2472
duke@435 2473 if (VerifyObjectStartArray) {
duke@435 2474 // Verify the block offset table
duke@435 2475 _bt.verify();
duke@435 2476 }
duke@435 2477 }
duke@435 2478
duke@435 2479 #ifndef PRODUCT
duke@435 2480 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2481 if (FLSVerifyLists) {
duke@435 2482 _dictionary->verify();
duke@435 2483 verifyIndexedFreeLists();
duke@435 2484 } else {
duke@435 2485 if (FLSVerifyDictionary) {
duke@435 2486 _dictionary->verify();
duke@435 2487 }
duke@435 2488 if (FLSVerifyIndexTable) {
duke@435 2489 verifyIndexedFreeLists();
duke@435 2490 }
duke@435 2491 }
duke@435 2492 }
duke@435 2493 #endif
duke@435 2494
duke@435 2495 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2496 size_t i = 0;
duke@435 2497 for (; i < MinChunkSize; i++) {
duke@435 2498 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2499 }
duke@435 2500 for (; i < IndexSetSize; i++) {
duke@435 2501 verifyIndexedFreeList(i);
duke@435 2502 }
duke@435 2503 }
duke@435 2504
duke@435 2505 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2506 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2507 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2508 size_t num = _indexedFreeList[size].count();
ysr@1580 2509 size_t n = 0;
ysr@3220 2510 guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2511 "Slot should have been empty");
ysr@1580 2512 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2513 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2514 guarantee(fc->isFree(), "!free?");
duke@435 2515 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2516 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2517 }
ysr@1580 2518 guarantee(n == num, "Incorrect count");
duke@435 2519 }
duke@435 2520
duke@435 2521 #ifndef PRODUCT
ysr@3220 2522 void CompactibleFreeListSpace::check_free_list_consistency() const {
duke@435 2523 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2524 "Some sizes can't be allocated without recourse to"
duke@435 2525 " linear allocation buffers");
duke@435 2526 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2527 "else MIN_TREE_CHUNK_SIZE is wrong");
ysr@3220 2528 assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
ysr@3220 2529 (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
duke@435 2530 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
duke@435 2531 "Some for-loops may be incorrectly initialized");
duke@435 2532 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2533 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2534 }
duke@435 2535 #endif
duke@435 2536
ysr@447 2537 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2538 assert_lock_strong(&_freelistLock);
ysr@447 2539 FreeList total;
ysr@447 2540 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2541 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2542 size_t totalFree = 0;
duke@435 2543 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2544 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2545 totalFree += fl->count() * fl->size();
ysr@447 2546 if (i % (40*IndexSetStride) == 0) {
ysr@447 2547 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2548 }
ysr@447 2549 fl->print_on(gclog_or_tty);
ysr@447 2550 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2551 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2552 total.set_desired( total.desired() + fl->desired() );
ysr@447 2553 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2554 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2555 total.set_count( total.count() + fl->count() );
ysr@447 2556 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2557 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2558 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2559 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2560 }
ysr@447 2561 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2562 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2563 SIZE_FORMAT " words", totalFree);
duke@435 2564 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2565 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2566 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2567 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2568 _dictionary->printDictCensus();
duke@435 2569 }
duke@435 2570
ysr@1580 2571 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2572 // CFLS_LAB
ysr@1580 2573 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2574
ysr@1580 2575 #define VECTOR_257(x) \
ysr@1580 2576 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2577 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2578 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2579 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2580 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2581 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2582 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2583 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2584 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2585 x }
ysr@1580 2586
ysr@1580 2587 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2588 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2589 // command-line, this will get reinitialized via a call to
ysr@1580 2590 // modify_initialization() below.
ysr@1580 2591 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2592 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2593 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
ysr@1580 2594 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2595
duke@435 2596 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2597 _cfls(cfls)
duke@435 2598 {
ysr@1580 2599 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2600 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2601 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2602 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2603 _indexedFreeList[i].set_size(i);
ysr@1580 2604 _num_blocks[i] = 0;
ysr@1580 2605 }
ysr@1580 2606 }
ysr@1580 2607
ysr@1580 2608 static bool _CFLS_LAB_modified = false;
ysr@1580 2609
ysr@1580 2610 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2611 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2612 _CFLS_LAB_modified = true;
ysr@1580 2613 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2614 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2615 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2616 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2617 }
duke@435 2618 }
duke@435 2619
duke@435 2620 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2621 FreeChunk* res;
ysr@2132 2622 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2623 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2624 // This locking manages sync with other large object allocations.
duke@435 2625 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2626 Mutex::_no_safepoint_check_flag);
duke@435 2627 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2628 if (res == NULL) return NULL;
duke@435 2629 } else {
duke@435 2630 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2631 if (fl->count() == 0) {
duke@435 2632 // Attempt to refill this local free list.
ysr@1580 2633 get_from_global_pool(word_sz, fl);
duke@435 2634 // If it didn't work, give up.
duke@435 2635 if (fl->count() == 0) return NULL;
duke@435 2636 }
duke@435 2637 res = fl->getChunkAtHead();
duke@435 2638 assert(res != NULL, "Why was count non-zero?");
duke@435 2639 }
duke@435 2640 res->markNotFree();
duke@435 2641 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2642 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2643 // mangle a just allocated object with a distinct pattern.
duke@435 2644 debug_only(res->mangleAllocated(word_sz));
duke@435 2645 return (HeapWord*)res;
duke@435 2646 }
duke@435 2647
ysr@1580 2648 // Get a chunk of blocks of the right size and update related
ysr@1580 2649 // book-keeping stats
ysr@1580 2650 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
ysr@1580 2651 // Get the #blocks we want to claim
ysr@1580 2652 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2653 assert(n_blks > 0, "Error");
ysr@1580 2654 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2655 // In some cases, when the application has a phase change,
ysr@1580 2656 // there may be a sudden and sharp shift in the object survival
ysr@1580 2657 // profile, and updating the counts at the end of a scavenge
ysr@1580 2658 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2659 // during these phase changes. It is beneficial to detect such
ysr@1580 2660 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2661 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2662 // It is protected by a product flag until we have gained
ysr@1580 2663 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2664 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2665 // small spikes, so some kind of historical smoothing based on
ysr@1580 2666 // previous experience with the greater reactivity might be useful.
ysr@1580 2667 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2668 // default.
ysr@1580 2669 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2670 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2671 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2672 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2673 }
ysr@1580 2674 assert(n_blks > 0, "Error");
ysr@1580 2675 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2676 // Update stats table entry for this block size
ysr@1580 2677 _num_blocks[word_sz] += fl->count();
ysr@1580 2678 }
ysr@1580 2679
ysr@1580 2680 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2681 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2682 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2683 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2684 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2685 "Counter inconsistency");
ysr@1580 2686 if (_global_num_workers[i] > 0) {
ysr@1580 2687 // Need to smooth wrt historical average
ysr@1580 2688 if (ResizeOldPLAB) {
ysr@1580 2689 _blocks_to_claim[i].sample(
ysr@1580 2690 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2691 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2692 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2693 }
ysr@1580 2694 // Reset counters for next round
ysr@1580 2695 _global_num_workers[i] = 0;
ysr@1580 2696 _global_num_blocks[i] = 0;
ysr@1580 2697 if (PrintOldPLAB) {
ysr@1580 2698 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2699 }
duke@435 2700 }
duke@435 2701 }
duke@435 2702 }
duke@435 2703
ysr@3220 2704 // If this is changed in the future to allow parallel
ysr@3220 2705 // access, one would need to take the FL locks and,
ysr@3220 2706 // depending on how it is used, stagger access from
ysr@3220 2707 // parallel threads to reduce contention.
ysr@1580 2708 void CFLS_LAB::retire(int tid) {
ysr@1580 2709 // We run this single threaded with the world stopped;
ysr@1580 2710 // so no need for locks and such.
ysr@1580 2711 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2712 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2713 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2714 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2715 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2716 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2717 "Can't retire more than what we obtained");
ysr@1580 2718 if (_num_blocks[i] > 0) {
ysr@1580 2719 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2720 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2721 {
ysr@3220 2722 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2723 // Mutex::_no_safepoint_check_flag);
ysr@3220 2724
ysr@1580 2725 // Update globals stats for num_blocks used
ysr@1580 2726 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2727 _global_num_workers[i]++;
ysr@1580 2728 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
ysr@1580 2729 if (num_retire > 0) {
ysr@1580 2730 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2731 // Reset this list.
ysr@1580 2732 _indexedFreeList[i] = FreeList();
ysr@1580 2733 _indexedFreeList[i].set_size(i);
ysr@1580 2734 }
ysr@1580 2735 }
ysr@1580 2736 if (PrintOldPLAB) {
ysr@1580 2737 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2738 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2739 }
ysr@1580 2740 // Reset stats for next round
ysr@1580 2741 _num_blocks[i] = 0;
ysr@1580 2742 }
ysr@1580 2743 }
ysr@1580 2744 }
ysr@1580 2745
ysr@1580 2746 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2747 assert(fl->count() == 0, "Precondition.");
duke@435 2748 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2749 "Precondition");
duke@435 2750
ysr@1580 2751 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2752 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2753 // then try getting a big chunk and splitting it.
ysr@1580 2754 {
ysr@1580 2755 bool found;
ysr@1580 2756 int k;
ysr@1580 2757 size_t cur_sz;
ysr@1580 2758 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2759 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2760 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2761 k++, cur_sz = k * word_sz) {
ysr@1580 2762 FreeList fl_for_cur_sz; // Empty.
ysr@1580 2763 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2764 {
ysr@1580 2765 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2766 Mutex::_no_safepoint_check_flag);
ysr@2071 2767 FreeList* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2768 if (gfl->count() != 0) {
ysr@1580 2769 // nn is the number of chunks of size cur_sz that
ysr@1580 2770 // we'd need to split k-ways each, in order to create
ysr@1580 2771 // "n" chunks of size word_sz each.
ysr@1580 2772 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2773 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2774 found = true;
ysr@1580 2775 if (k > 1) {
ysr@1580 2776 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2777 // we increment the split death count by the number of blocks
ysr@1580 2778 // we just took from the cur_sz-size blocks list and which
ysr@1580 2779 // we will be splitting below.
ysr@2071 2780 ssize_t deaths = gfl->splitDeaths() +
ysr@1580 2781 fl_for_cur_sz.count();
ysr@2071 2782 gfl->set_splitDeaths(deaths);
ysr@1580 2783 }
ysr@1580 2784 }
ysr@1580 2785 }
ysr@1580 2786 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2787 if (found) {
ysr@1580 2788 if (k == 1) {
ysr@1580 2789 fl->prepend(&fl_for_cur_sz);
ysr@1580 2790 } else {
ysr@1580 2791 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2792 FreeChunk* fc;
ysr@1580 2793 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
ysr@1580 2794 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2795 // access the main chunk sees it as a single free block until we
ysr@1580 2796 // change it.
ysr@1580 2797 size_t fc_size = fc->size();
ysr@2071 2798 assert(fc->isFree(), "Error");
ysr@1580 2799 for (int i = k-1; i >= 0; i--) {
ysr@1580 2800 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2801 assert((i != 0) ||
ysr@2071 2802 ((fc == ffc) && ffc->isFree() &&
ysr@2071 2803 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2804 "Counting error");
ysr@1580 2805 ffc->setSize(word_sz);
ysr@2071 2806 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ysr@1580 2807 ffc->linkNext(NULL);
ysr@1580 2808 // Above must occur before BOT is updated below.
ysr@2071 2809 OrderAccess::storestore();
ysr@2071 2810 // splitting from the right, fc_size == i * word_sz
ysr@2071 2811 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2812 fc_size -= word_sz;
ysr@2071 2813 assert(fc_size == i*word_sz, "Error");
ysr@2071 2814 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2815 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2816 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2817 // Push this on "fl".
ysr@1580 2818 fl->returnChunkAtHead(ffc);
ysr@1580 2819 }
ysr@1580 2820 // TRAP
ysr@1580 2821 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2822 }
ysr@1580 2823 }
ysr@1580 2824 // Update birth stats for this block size.
ysr@1580 2825 size_t num = fl->count();
ysr@1580 2826 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2827 Mutex::_no_safepoint_check_flag);
ysr@1580 2828 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
ysr@1580 2829 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2830 return;
duke@435 2831 }
duke@435 2832 }
duke@435 2833 }
duke@435 2834 // Otherwise, we'll split a block from the dictionary.
duke@435 2835 FreeChunk* fc = NULL;
duke@435 2836 FreeChunk* rem_fc = NULL;
duke@435 2837 size_t rem;
duke@435 2838 {
duke@435 2839 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2840 Mutex::_no_safepoint_check_flag);
duke@435 2841 while (n > 0) {
duke@435 2842 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2843 _dictionary->minSize()),
duke@435 2844 FreeBlockDictionary::atLeast);
duke@435 2845 if (fc != NULL) {
ysr@2071 2846 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
duke@435 2847 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2848 true /*split*/,
duke@435 2849 false /*birth*/);
duke@435 2850 break;
duke@435 2851 } else {
duke@435 2852 n--;
duke@435 2853 }
duke@435 2854 }
duke@435 2855 if (fc == NULL) return;
ysr@2071 2856 // Otherwise, split up that block.
ysr@1580 2857 assert((ssize_t)n >= 1, "Control point invariant");
ysr@2071 2858 assert(fc->isFree(), "Error: should be a free block");
ysr@2071 2859 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2860 const size_t nn = fc->size() / word_sz;
duke@435 2861 n = MIN2(nn, n);
ysr@1580 2862 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2863 rem = fc->size() - n * word_sz;
duke@435 2864 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2865 if (rem > 0 && rem < MinChunkSize) {
duke@435 2866 n--; rem += word_sz;
duke@435 2867 }
jmasa@1583 2868 // Note that at this point we may have n == 0.
jmasa@1583 2869 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2870
jmasa@1583 2871 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2872 // enough to leave a viable remainder. We are unable to
jmasa@1583 2873 // allocate even one block. Return fc to the
jmasa@1583 2874 // dictionary and return, leaving "fl" empty.
jmasa@1583 2875 if (n == 0) {
jmasa@1583 2876 returnChunkToDictionary(fc);
ysr@2071 2877 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2878 return;
jmasa@1583 2879 }
jmasa@1583 2880
duke@435 2881 // First return the remainder, if any.
duke@435 2882 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2883 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2884 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2885 // hit if the block is a small block.)
duke@435 2886 if (rem > 0) {
duke@435 2887 size_t prefix_size = n * word_sz;
duke@435 2888 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2889 rem_fc->setSize(rem);
ysr@2071 2890 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2891 rem_fc->linkNext(NULL);
duke@435 2892 // Above must occur before BOT is updated below.
ysr@1580 2893 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2894 OrderAccess::storestore();
duke@435 2895 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
ysr@2071 2896 assert(fc->isFree(), "Error");
ysr@2071 2897 fc->setSize(prefix_size);
duke@435 2898 if (rem >= IndexSetSize) {
duke@435 2899 returnChunkToDictionary(rem_fc);
ysr@1580 2900 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
duke@435 2901 rem_fc = NULL;
duke@435 2902 }
duke@435 2903 // Otherwise, return it to the small list below.
duke@435 2904 }
duke@435 2905 }
duke@435 2906 if (rem_fc != NULL) {
duke@435 2907 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2908 Mutex::_no_safepoint_check_flag);
duke@435 2909 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2910 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2911 smallSplitBirth(rem);
duke@435 2912 }
ysr@1580 2913 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2914 // Now do the splitting up.
duke@435 2915 // Must do this in reverse order, so that anybody attempting to
duke@435 2916 // access the main chunk sees it as a single free block until we
duke@435 2917 // change it.
duke@435 2918 size_t fc_size = n * word_sz;
duke@435 2919 // All but first chunk in this loop
duke@435 2920 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2921 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2922 ffc->setSize(word_sz);
ysr@2071 2923 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2924 ffc->linkNext(NULL);
duke@435 2925 // Above must occur before BOT is updated below.
ysr@2071 2926 OrderAccess::storestore();
duke@435 2927 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2928 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2929 fc_size -= word_sz;
duke@435 2930 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2931 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2932 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2933 // Push this on "fl".
duke@435 2934 fl->returnChunkAtHead(ffc);
duke@435 2935 }
duke@435 2936 // First chunk
ysr@2071 2937 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2938 // The blocks above should show their new sizes before the first block below
duke@435 2939 fc->setSize(word_sz);
ysr@2071 2940 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
duke@435 2941 fc->linkNext(NULL);
duke@435 2942 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2943 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2944 fl->returnChunkAtHead(fc);
duke@435 2945
ysr@1580 2946 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2947 {
ysr@1580 2948 // Update the stats for this block size.
duke@435 2949 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2950 Mutex::_no_safepoint_check_flag);
ysr@1580 2951 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
ysr@1580 2952 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2953 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2954 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2955 }
duke@435 2956
duke@435 2957 // TRAP
duke@435 2958 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2959 }
duke@435 2960
duke@435 2961 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2962 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2963 // XXX Need to suitably abstract and generalize this and the next
duke@435 2964 // method into one.
duke@435 2965 void
duke@435 2966 CompactibleFreeListSpace::
duke@435 2967 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2968 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2969 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2970 const size_t task_size = rescan_task_size();
duke@435 2971 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2972 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2973 assert(n_tasks == 0 ||
ysr@775 2974 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2975 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2976 "n_tasks calculation incorrect");
duke@435 2977 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2978 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2979 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2980 // need to finish in order to be done).
jmasa@2188 2981 pst->set_n_threads(n_threads);
duke@435 2982 pst->set_n_tasks((int)n_tasks);
duke@435 2983 }
duke@435 2984
duke@435 2985 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2986 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2987 void
duke@435 2988 CompactibleFreeListSpace::
duke@435 2989 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2990 HeapWord* low) {
duke@435 2991 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2992 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2993 const size_t task_size = marking_task_size();
duke@435 2994 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 2995 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 2996 "Otherwise arithmetic below would be incorrect");
duke@435 2997 MemRegion span = _gen->reserved();
duke@435 2998 if (low != NULL) {
duke@435 2999 if (span.contains(low)) {
duke@435 3000 // Align low down to a card boundary so that
duke@435 3001 // we can use block_offset_careful() on span boundaries.
duke@435 3002 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3003 CardTableModRefBS::card_size);
duke@435 3004 // Clip span prefix at aligned_low
duke@435 3005 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3006 } else if (low > span.end()) {
duke@435 3007 span = MemRegion(low, low); // Null region
duke@435 3008 } // else use entire span
duke@435 3009 }
duke@435 3010 assert(span.is_empty() ||
duke@435 3011 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3012 "span should start at a card boundary");
duke@435 3013 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3014 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3015 assert(n_tasks == 0 ||
duke@435 3016 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3017 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3018 "n_tasks calculation incorrect");
duke@435 3019 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3020 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3021 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3022 // need to finish in order to be done).
jmasa@2188 3023 pst->set_n_threads(n_threads);
duke@435 3024 pst->set_n_tasks((int)n_tasks);
duke@435 3025 }

mercurial