src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 13 Mar 2014 14:57:01 -0700

author
kvn
date
Thu, 13 Mar 2014 14:57:01 -0700
changeset 6513
bbfbe9b06038
parent 6493
3205e78d8193
parent 6337
ab36007d6358
child 6680
78bbf4d43a14
permissions
-rw-r--r--

Merge

duke@435 1 /*
hseigel@4465 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
coleenp@4037 32 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/globals.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/init.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/vmThread.hpp"
stefank@2314 43 #include "utilities/copy.hpp"
duke@435 44
duke@435 45 /////////////////////////////////////////////////////////////////////////
duke@435 46 //// CompactibleFreeListSpace
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48
duke@435 49 // highest ranked free list lock rank
duke@435 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 51
kvn@1926 52 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 55
kvn@1926 56 size_t MinChunkSize = 0;
kvn@1926 57
kvn@1926 58 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 59 // Set CMS global values
kvn@1926 60 assert(MinChunkSize == 0, "already set");
brutisso@3807 61
brutisso@3807 62 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
brutisso@3807 63 // for chunks to contain a FreeChunk.
brutisso@3807 64 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
brutisso@3807 65 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
kvn@1926 66
kvn@1926 67 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 68 IndexSetStart = MinChunkSize;
kvn@1926 69 IndexSetStride = MinObjAlignment;
kvn@1926 70 }
kvn@1926 71
duke@435 72 // Constructor
duke@435 73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 74 MemRegion mr, bool use_adaptive_freelists,
jmasa@3730 75 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@435 76 _dictionaryChoice(dictionaryChoice),
duke@435 77 _adaptive_freelists(use_adaptive_freelists),
duke@435 78 _bt(bs, mr),
duke@435 79 // free list locks are in the range of values taken by _lockRank
duke@435 80 // This range currently is [_leaf+2, _leaf+3]
duke@435 81 // Note: this requires that CFLspace c'tors
duke@435 82 // are called serially in the order in which the locks are
duke@435 83 // are acquired in the program text. This is true today.
duke@435 84 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 85 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 86 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 87 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 88 CMSRescanMultiple),
duke@435 89 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 90 CMSConcMarkMultiple),
duke@435 91 _collector(NULL)
duke@435 92 {
jmasa@3730 93 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
jmasa@4196 94 "FreeChunk is larger than expected");
duke@435 95 _bt.set_space(this);
jmasa@698 96 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 97 // We have all of "mr", all of which we place in the dictionary
duke@435 98 // as one big chunk. We'll need to decide here which of several
duke@435 99 // possible alternative dictionary implementations to use. For
duke@435 100 // now the choice is easy, since we have only one working
duke@435 101 // implementation, namely, the simple binary tree (splaying
duke@435 102 // temporarily disabled).
duke@435 103 switch (dictionaryChoice) {
jmasa@4196 104 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
jmasa@4488 105 _dictionary = new AFLBinaryTreeDictionary(mr);
jmasa@4196 106 break;
jmasa@3730 107 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
jmasa@3730 108 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
duke@435 109 default:
duke@435 110 warning("dictionaryChoice: selected option not understood; using"
duke@435 111 " default BinaryTreeDictionary implementation instead.");
duke@435 112 }
duke@435 113 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 114 // The indexed free lists are initially all empty and are lazily
duke@435 115 // filled in on demand. Initialize the array elements to NULL.
duke@435 116 initializeIndexedFreeListArray();
duke@435 117
duke@435 118 // Not using adaptive free lists assumes that allocation is first
duke@435 119 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 120 // has to have the klass's klassKlass allocated at a lower
duke@435 121 // address in the heap than the klass so that the klassKlass is
duke@435 122 // moved to its new location before the klass is moved.
duke@435 123 // Set the _refillSize for the linear allocation blocks
duke@435 124 if (!use_adaptive_freelists) {
jmasa@4488 125 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
jmasa@4488 126 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 127 // The small linAB initially has all the space and will allocate
duke@435 128 // a chunk of any size.
duke@435 129 HeapWord* addr = (HeapWord*) fc;
duke@435 130 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 131 1024*SmallForLinearAlloc, fc->size());
duke@435 132 // Note that _unallocated_block is not updated here.
duke@435 133 // Allocations from the linear allocation block should
duke@435 134 // update it.
duke@435 135 } else {
duke@435 136 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 137 SmallForLinearAlloc);
duke@435 138 }
duke@435 139 // CMSIndexedFreeListReplenish should be at least 1
duke@435 140 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 141 _promoInfo.setSpace(this);
duke@435 142 if (UseCMSBestFit) {
duke@435 143 _fitStrategy = FreeBlockBestFitFirst;
duke@435 144 } else {
duke@435 145 _fitStrategy = FreeBlockStrategyNone;
duke@435 146 }
ysr@3220 147 check_free_list_consistency();
duke@435 148
duke@435 149 // Initialize locks for parallel case.
jmasa@2188 150
jmasa@2188 151 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 152 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 153 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 154 "a freelist par lock",
duke@435 155 true);
duke@435 156 DEBUG_ONLY(
duke@435 157 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 158 )
duke@435 159 }
duke@435 160 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 161 }
duke@435 162 }
duke@435 163
duke@435 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 165 // update the block offset table. Removed initialize_threshold call because
duke@435 166 // CFLS does not use a block offset array for contiguous spaces.
duke@435 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 168 CompactPoint* cp, HeapWord* compact_top) {
duke@435 169 // q is alive
duke@435 170 // First check if we should switch compaction space
duke@435 171 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 172 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 173 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 174 "virtual adjustObjectSize_v() method is not correct");
duke@435 175 size_t adjusted_size = adjustObjectSize(size);
duke@435 176 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 177 "no small fragments allowed");
duke@435 178 assert(minimum_free_block_size() == MinChunkSize,
duke@435 179 "for de-virtualized reference below");
duke@435 180 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 181 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 182 adjusted_size != compaction_max_size) {
duke@435 183 do {
duke@435 184 // switch to next compaction space
duke@435 185 cp->space->set_compaction_top(compact_top);
duke@435 186 cp->space = cp->space->next_compaction_space();
duke@435 187 if (cp->space == NULL) {
duke@435 188 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 189 assert(cp->gen != NULL, "compaction must succeed");
duke@435 190 cp->space = cp->gen->first_compaction_space();
duke@435 191 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 192 }
duke@435 193 compact_top = cp->space->bottom();
duke@435 194 cp->space->set_compaction_top(compact_top);
duke@435 195 // The correct adjusted_size may not be the same as that for this method
duke@435 196 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 197 // Use the virtual method which is not used above to save the virtual
duke@435 198 // dispatch.
duke@435 199 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 200 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 201 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 202 } while (adjusted_size > compaction_max_size);
duke@435 203 }
duke@435 204
duke@435 205 // store the forwarding pointer into the mark word
duke@435 206 if ((HeapWord*)q != compact_top) {
duke@435 207 q->forward_to(oop(compact_top));
duke@435 208 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 209 } else {
duke@435 210 // if the object isn't moving we can just set the mark to the default
duke@435 211 // mark and handle it specially later on.
duke@435 212 q->init_mark();
duke@435 213 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 214 }
duke@435 215
duke@435 216 compact_top += adjusted_size;
duke@435 217
duke@435 218 // we need to update the offset table so that the beginnings of objects can be
duke@435 219 // found during scavenge. Note that we are updating the offset table based on
duke@435 220 // where the object will be once the compaction phase finishes.
duke@435 221
duke@435 222 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 223 // the compaction_top exceeds the current threshold but not for an
duke@435 224 // non-contiguous space.
duke@435 225 cp->threshold =
duke@435 226 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 227 return compact_top;
duke@435 228 }
duke@435 229
duke@435 230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 231 // and use of single_block instead of alloc_block. The name here is not really
duke@435 232 // appropriate - maybe a more general name could be invented for both the
duke@435 233 // contiguous and noncontiguous spaces.
duke@435 234
duke@435 235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 236 _bt.single_block(start, the_end);
duke@435 237 return end();
duke@435 238 }
duke@435 239
duke@435 240 // Initialize them to NULL.
duke@435 241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 242 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 243 // Note that on platforms where objects are double word aligned,
duke@435 244 // the odd array elements are not used. It is convenient, however,
duke@435 245 // to map directly from the object size to the array element.
duke@435 246 _indexedFreeList[i].reset(IndexSetSize);
duke@435 247 _indexedFreeList[i].set_size(i);
duke@435 248 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 249 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 250 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 251 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 252 }
duke@435 253 }
duke@435 254
duke@435 255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 256 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 257 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 258 "Indexed free list sizes are incorrect");
duke@435 259 _indexedFreeList[i].reset(IndexSetSize);
duke@435 260 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 261 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 262 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 263 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 264 }
duke@435 265 }
duke@435 266
duke@435 267 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 268 resetIndexedFreeListArray();
duke@435 269 dictionary()->reset();
duke@435 270 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 271 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 272 // Everything's allocated until proven otherwise.
duke@435 273 _bt.set_unallocated_block(end());
duke@435 274 }
duke@435 275 if (!mr.is_empty()) {
duke@435 276 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 277 _bt.single_block(mr.start(), mr.word_size());
duke@435 278 FreeChunk* fc = (FreeChunk*) mr.start();
jmasa@3732 279 fc->set_size(mr.word_size());
duke@435 280 if (mr.word_size() >= IndexSetSize ) {
duke@435 281 returnChunkToDictionary(fc);
duke@435 282 } else {
duke@435 283 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
jmasa@3732 284 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
duke@435 285 }
brutisso@5163 286 coalBirth(mr.word_size());
duke@435 287 }
duke@435 288 _promoInfo.reset();
duke@435 289 _smallLinearAllocBlock._ptr = NULL;
duke@435 290 _smallLinearAllocBlock._word_size = 0;
duke@435 291 }
duke@435 292
duke@435 293 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 294 // Reset the space to the new reality - one free chunk.
duke@435 295 MemRegion mr(compaction_top(), end());
duke@435 296 reset(mr);
duke@435 297 // Now refill the linear allocation block(s) if possible.
duke@435 298 if (_adaptive_freelists) {
duke@435 299 refillLinearAllocBlocksIfNeeded();
duke@435 300 } else {
duke@435 301 // Place as much of mr in the linAB as we can get,
duke@435 302 // provided it was big enough to go into the dictionary.
jmasa@3732 303 FreeChunk* fc = dictionary()->find_largest_dict();
duke@435 304 if (fc != NULL) {
duke@435 305 assert(fc->size() == mr.word_size(),
duke@435 306 "Why was the chunk broken up?");
duke@435 307 removeChunkFromDictionary(fc);
duke@435 308 HeapWord* addr = (HeapWord*) fc;
duke@435 309 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 310 1024*SmallForLinearAlloc, fc->size());
duke@435 311 // Note that _unallocated_block is not updated here.
duke@435 312 }
duke@435 313 }
duke@435 314 }
duke@435 315
duke@435 316 // Walks the entire dictionary, returning a coterminal
duke@435 317 // chunk, if it exists. Use with caution since it involves
duke@435 318 // a potentially complete walk of a potentially large tree.
duke@435 319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 320
duke@435 321 assert_lock_strong(&_freelistLock);
duke@435 322
duke@435 323 return dictionary()->find_chunk_ends_at(end());
duke@435 324 }
duke@435 325
duke@435 326
duke@435 327 #ifndef PRODUCT
duke@435 328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 329 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 330 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
duke@435 331 }
duke@435 332 }
duke@435 333
duke@435 334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 335 size_t sum = 0;
duke@435 336 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 337 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
duke@435 338 }
duke@435 339 return sum;
duke@435 340 }
duke@435 341
duke@435 342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 343 size_t count = 0;
ysr@3264 344 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 345 debug_only(
duke@435 346 ssize_t total_list_count = 0;
duke@435 347 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 348 fc = fc->next()) {
duke@435 349 total_list_count++;
duke@435 350 }
duke@435 351 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 352 "Count in list is incorrect");
duke@435 353 )
duke@435 354 count += _indexedFreeList[i].count();
duke@435 355 }
duke@435 356 return count;
duke@435 357 }
duke@435 358
duke@435 359 size_t CompactibleFreeListSpace::totalCount() {
duke@435 360 size_t num = totalCountInIndexedFreeLists();
jmasa@3732 361 num += dictionary()->total_count();
duke@435 362 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 363 num++;
duke@435 364 }
duke@435 365 return num;
duke@435 366 }
duke@435 367 #endif
duke@435 368
duke@435 369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 370 FreeChunk* fc = (FreeChunk*) p;
jmasa@3732 371 return fc->is_free();
duke@435 372 }
duke@435 373
duke@435 374 size_t CompactibleFreeListSpace::used() const {
duke@435 375 return capacity() - free();
duke@435 376 }
duke@435 377
duke@435 378 size_t CompactibleFreeListSpace::free() const {
duke@435 379 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 380 // if you do this while the structures are in flux you
duke@435 381 // may get an approximate answer only; for instance
duke@435 382 // because there is concurrent allocation either
duke@435 383 // directly by mutators or for promotion during a GC.
duke@435 384 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 385 // not to crash and burn, for instance, because of walking
duke@435 386 // pointers that could disappear as you were walking them.
duke@435 387 // The approximation is because the various components
duke@435 388 // that are read below are not read atomically (and
duke@435 389 // further the computation of totalSizeInIndexedFreeLists()
duke@435 390 // is itself a non-atomic computation. The normal use of
duke@435 391 // this is during a resize operation at the end of GC
duke@435 392 // and at that time you are guaranteed to get the
duke@435 393 // correct actual value. However, for instance, this is
duke@435 394 // also read completely asynchronously by the "perf-sampler"
duke@435 395 // that supports jvmstat, and you are apt to see the values
duke@435 396 // flicker in such cases.
duke@435 397 assert(_dictionary != NULL, "No _dictionary?");
jmasa@3732 398 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
duke@435 399 totalSizeInIndexedFreeLists() +
duke@435 400 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 401 }
duke@435 402
duke@435 403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 404 assert(_dictionary != NULL, "No _dictionary?");
duke@435 405 assert_locked();
jmasa@3732 406 size_t res = _dictionary->max_chunk_size();
duke@435 407 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 408 (size_t) SmallForLinearAlloc - 1));
duke@435 409 // XXX the following could potentially be pretty slow;
duke@435 410 // should one, pesimally for the rare cases when res
duke@435 411 // caclulated above is less than IndexSetSize,
duke@435 412 // just return res calculated above? My reasoning was that
duke@435 413 // those cases will be so rare that the extra time spent doesn't
duke@435 414 // really matter....
duke@435 415 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 416 // to i > res below, because i is unsigned and res may be zero.
duke@435 417 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 418 i -= IndexSetStride) {
duke@435 419 if (_indexedFreeList[i].head() != NULL) {
duke@435 420 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 421 return i;
duke@435 422 }
duke@435 423 }
duke@435 424 return res;
duke@435 425 }
duke@435 426
ysr@2071 427 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 428 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 429 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
ysr@2071 430 _ptr, _word_size, _refillSize, _allocation_size_limit);
ysr@2071 431 }
ysr@2071 432
ysr@2071 433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 434 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 435 st->print_cr(" Space:");
ysr@2071 436 Space::print_on(st);
ysr@2071 437
ysr@2071 438 st->print_cr("promoInfo:");
ysr@2071 439 _promoInfo.print_on(st);
ysr@2071 440
ysr@2071 441 st->print_cr("_smallLinearAllocBlock");
ysr@2071 442 _smallLinearAllocBlock.print_on(st);
ysr@2071 443
ysr@2071 444 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 445
ysr@2071 446 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 447 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 448 }
ysr@2071 449
ysr@1580 450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 451 const {
ysr@1580 452 reportIndexedFreeListStatistics();
ysr@1580 453 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 454 gclog_or_tty->print_cr("---------------------------");
jmasa@4196 455 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
ysr@1580 456 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 457 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 458 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 459 fc = fc->next()) {
ysr@1580 460 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 461 fc, (HeapWord*)fc + i,
ysr@1580 462 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 463 }
ysr@1580 464 }
ysr@1580 465 }
ysr@1580 466
ysr@1580 467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 468 const {
ysr@1580 469 _promoInfo.print_on(st);
ysr@1580 470 }
ysr@1580 471
ysr@1580 472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 473 const {
jmasa@3732 474 _dictionary->report_statistics();
ysr@1580 475 st->print_cr("Layout of Freelists in Tree");
ysr@1580 476 st->print_cr("---------------------------");
ysr@1580 477 _dictionary->print_free_lists(st);
ysr@1580 478 }
ysr@1580 479
ysr@1580 480 class BlkPrintingClosure: public BlkClosure {
ysr@1580 481 const CMSCollector* _collector;
ysr@1580 482 const CompactibleFreeListSpace* _sp;
ysr@1580 483 const CMSBitMap* _live_bit_map;
ysr@1580 484 const bool _post_remark;
ysr@1580 485 outputStream* _st;
ysr@1580 486 public:
ysr@1580 487 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 488 const CompactibleFreeListSpace* sp,
ysr@1580 489 const CMSBitMap* live_bit_map,
ysr@1580 490 outputStream* st):
ysr@1580 491 _collector(collector),
ysr@1580 492 _sp(sp),
ysr@1580 493 _live_bit_map(live_bit_map),
ysr@1580 494 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 495 _st(st) { }
ysr@1580 496 size_t do_blk(HeapWord* addr);
ysr@1580 497 };
ysr@1580 498
ysr@1580 499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 500 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 501 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 502 if (_sp->block_is_obj(addr)) {
ysr@1580 503 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 504 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 505 addr,
ysr@1580 506 dead ? "dead" : "live",
ysr@1580 507 sz,
ysr@1580 508 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 509 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 510 oop(addr)->print_on(_st);
ysr@1580 511 _st->print_cr("--------------------------------------");
ysr@1580 512 }
ysr@1580 513 } else { // free block
ysr@1580 514 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 515 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 516 if (CMSPrintChunksInDump) {
ysr@1580 517 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 518 _st->print_cr("--------------------------------------");
ysr@1580 519 }
ysr@1580 520 }
ysr@1580 521 return sz;
ysr@1580 522 }
ysr@1580 523
ysr@1580 524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 525 outputStream* st) {
ysr@1580 526 st->print_cr("\n=========================");
ysr@1580 527 st->print_cr("Block layout in CMS Heap:");
ysr@1580 528 st->print_cr("=========================");
ysr@1580 529 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 530 blk_iterate(&bpcl);
ysr@1580 531
ysr@1580 532 st->print_cr("\n=======================================");
ysr@1580 533 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 534 st->print_cr("=======================================");
ysr@1580 535 print_promo_info_blocks(st);
ysr@1580 536
ysr@1580 537 st->print_cr("\n===========================");
ysr@1580 538 st->print_cr("Order of Indexed Free Lists");
ysr@1580 539 st->print_cr("=========================");
ysr@1580 540 print_indexed_free_lists(st);
ysr@1580 541
ysr@1580 542 st->print_cr("\n=================================");
ysr@1580 543 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 544 st->print_cr("=================================");
ysr@1580 545 print_dictionary_free_lists(st);
ysr@1580 546 }
ysr@1580 547
ysr@1580 548
duke@435 549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 550 assert_lock_strong(&_freelistLock);
duke@435 551 assert(PrintFLSStatistics != 0, "Reporting error");
jmasa@3732 552 _dictionary->report_statistics();
duke@435 553 if (PrintFLSStatistics > 1) {
duke@435 554 reportIndexedFreeListStatistics();
jmasa@3732 555 size_t total_size = totalSizeInIndexedFreeLists() +
jmasa@3732 556 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
hseigel@4465 557 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
duke@435 558 }
duke@435 559 }
duke@435 560
duke@435 561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 562 assert_lock_strong(&_freelistLock);
duke@435 563 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 564 "--------------------------------\n");
jmasa@3732 565 size_t total_size = totalSizeInIndexedFreeLists();
jmasa@3732 566 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
jmasa@3732 567 gclog_or_tty->print("Total Free Space: %d\n", total_size);
duke@435 568 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
jmasa@3732 569 gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
jmasa@3732 570 if (free_blocks != 0) {
jmasa@3732 571 gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
duke@435 572 }
duke@435 573 }
duke@435 574
duke@435 575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 576 size_t res = 0;
duke@435 577 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 578 debug_only(
duke@435 579 ssize_t recount = 0;
duke@435 580 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 581 fc = fc->next()) {
duke@435 582 recount += 1;
duke@435 583 }
duke@435 584 assert(recount == _indexedFreeList[i].count(),
duke@435 585 "Incorrect count in list");
duke@435 586 )
duke@435 587 res += _indexedFreeList[i].count();
duke@435 588 }
duke@435 589 return res;
duke@435 590 }
duke@435 591
duke@435 592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 593 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 594 if (_indexedFreeList[i].head() != NULL) {
duke@435 595 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 596 return (size_t)i;
duke@435 597 }
duke@435 598 }
duke@435 599 return 0;
duke@435 600 }
duke@435 601
duke@435 602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 603 HeapWord* prevEnd = end();
duke@435 604 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 605 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 606 "New end is below unallocated block");
duke@435 607 _end = value;
duke@435 608 if (prevEnd != NULL) {
duke@435 609 // Resize the underlying block offset table.
duke@435 610 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 611 if (value <= prevEnd) {
ysr@2071 612 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 613 "New end is below unallocated block");
ysr@1580 614 } else {
ysr@1580 615 // Now, take this new chunk and add it to the free blocks.
ysr@1580 616 // Note that the BOT has not yet been updated for this block.
ysr@1580 617 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 618 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 619 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 620 // Mark the boundary of the new block in BOT
ysr@1580 621 _bt.mark_block(prevEnd, value);
ysr@1580 622 // put it all in the linAB
ysr@1580 623 if (ParallelGCThreads == 0) {
ysr@1580 624 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 625 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 626 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 627 } else { // ParallelGCThreads > 0
ysr@1580 628 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 629 Mutex::_no_safepoint_check_flag);
ysr@1580 630 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 631 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 632 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 633 }
ysr@1580 634 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 635 // of chunks as they are allocated out of a LinAB are.
ysr@1580 636 } else {
ysr@1580 637 // Add the block to the free lists, if possible coalescing it
ysr@1580 638 // with the last free block, and update the BOT and census data.
ysr@1580 639 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 640 }
duke@435 641 }
duke@435 642 }
duke@435 643 }
duke@435 644
duke@435 645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 646 CompactibleFreeListSpace* _cfls;
duke@435 647 CMSCollector* _collector;
duke@435 648 protected:
duke@435 649 // Override.
duke@435 650 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 651 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 652 HeapWord* bottom, HeapWord* top, \
duke@435 653 ClosureType* cl); \
duke@435 654 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 655 HeapWord* bottom, HeapWord* top, \
duke@435 656 ClosureType* cl); \
duke@435 657 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 658 HeapWord* bottom, HeapWord* top, \
duke@435 659 ClosureType* cl)
coleenp@4037 660 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
duke@435 661 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 662
duke@435 663 public:
duke@435 664 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 665 CMSCollector* collector,
coleenp@4037 666 ExtendedOopClosure* cl,
duke@435 667 CardTableModRefBS::PrecisionStyle precision,
duke@435 668 HeapWord* boundary) :
duke@435 669 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 670 _cfls(sp), _collector(collector) {}
duke@435 671 };
duke@435 672
duke@435 673 // We de-virtualize the block-related calls below, since we know that our
duke@435 674 // space is a CompactibleFreeListSpace.
jmasa@3294 675
duke@435 676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 678 HeapWord* bottom, \
duke@435 679 HeapWord* top, \
duke@435 680 ClosureType* cl) { \
jmasa@3294 681 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 682 if (is_par) { \
jmasa@3294 683 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 684 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 685 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 686 } else { \
duke@435 687 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 688 } \
duke@435 689 } \
duke@435 690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 691 HeapWord* bottom, \
duke@435 692 HeapWord* top, \
duke@435 693 ClosureType* cl) { \
duke@435 694 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 695 back too far. */ \
duke@435 696 HeapWord* mr_start = mr.start(); \
duke@435 697 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 698 HeapWord* next = bottom + bot_size; \
duke@435 699 while (next < mr_start) { \
duke@435 700 bottom = next; \
duke@435 701 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 702 next = bottom + bot_size; \
duke@435 703 } \
duke@435 704 \
duke@435 705 while (bottom < top) { \
duke@435 706 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 707 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 708 oop(bottom)) && \
duke@435 709 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 710 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 711 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 712 } else { \
duke@435 713 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 714 } \
duke@435 715 } \
duke@435 716 } \
duke@435 717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 718 HeapWord* bottom, \
duke@435 719 HeapWord* top, \
duke@435 720 ClosureType* cl) { \
duke@435 721 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 722 back too far. */ \
duke@435 723 HeapWord* mr_start = mr.start(); \
duke@435 724 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 725 HeapWord* next = bottom + bot_size; \
duke@435 726 while (next < mr_start) { \
duke@435 727 bottom = next; \
duke@435 728 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 729 next = bottom + bot_size; \
duke@435 730 } \
duke@435 731 \
duke@435 732 while (bottom < top) { \
duke@435 733 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 734 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 735 oop(bottom)) && \
duke@435 736 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 737 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 738 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 739 } else { \
duke@435 740 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 741 } \
duke@435 742 } \
duke@435 743 }
duke@435 744
duke@435 745 // (There are only two of these, rather than N, because the split is due
duke@435 746 // only to the introduction of the FilteringClosure, a local part of the
duke@435 747 // impl of this abstraction.)
coleenp@4037 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
duke@435 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 750
duke@435 751 DirtyCardToOopClosure*
coleenp@4037 752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 753 CardTableModRefBS::PrecisionStyle precision,
duke@435 754 HeapWord* boundary) {
duke@435 755 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 756 }
duke@435 757
duke@435 758
duke@435 759 // Note on locking for the space iteration functions:
duke@435 760 // since the collector's iteration activities are concurrent with
duke@435 761 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 762 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 763 // may suddenly be allocated or divided up and part of it allocated and
duke@435 764 // so on.
duke@435 765
duke@435 766 // Apply the given closure to each block in the space.
duke@435 767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 768 assert_lock_strong(freelistLock());
duke@435 769 HeapWord *cur, *limit;
duke@435 770 for (cur = bottom(), limit = end(); cur < limit;
duke@435 771 cur += cl->do_blk_careful(cur));
duke@435 772 }
duke@435 773
duke@435 774 // Apply the given closure to each block in the space.
duke@435 775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 776 assert_lock_strong(freelistLock());
duke@435 777 HeapWord *cur, *limit;
duke@435 778 for (cur = bottom(), limit = end(); cur < limit;
duke@435 779 cur += cl->do_blk(cur));
duke@435 780 }
duke@435 781
duke@435 782 // Apply the given closure to each oop in the space.
coleenp@4037 783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
duke@435 784 assert_lock_strong(freelistLock());
duke@435 785 HeapWord *cur, *limit;
duke@435 786 size_t curSize;
duke@435 787 for (cur = bottom(), limit = end(); cur < limit;
duke@435 788 cur += curSize) {
duke@435 789 curSize = block_size(cur);
duke@435 790 if (block_is_obj(cur)) {
duke@435 791 oop(cur)->oop_iterate(cl);
duke@435 792 }
duke@435 793 }
duke@435 794 }
duke@435 795
duke@435 796 // Apply the given closure to each oop in the space \intersect memory region.
coleenp@4037 797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
duke@435 798 assert_lock_strong(freelistLock());
duke@435 799 if (is_empty()) {
duke@435 800 return;
duke@435 801 }
duke@435 802 MemRegion cur = MemRegion(bottom(), end());
duke@435 803 mr = mr.intersection(cur);
duke@435 804 if (mr.is_empty()) {
duke@435 805 return;
duke@435 806 }
duke@435 807 if (mr.equals(cur)) {
duke@435 808 oop_iterate(cl);
duke@435 809 return;
duke@435 810 }
duke@435 811 assert(mr.end() <= end(), "just took an intersection above");
duke@435 812 HeapWord* obj_addr = block_start(mr.start());
duke@435 813 HeapWord* t = mr.end();
duke@435 814
duke@435 815 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 816 if (block_is_obj(obj_addr)) {
duke@435 817 // Handle first object specially.
duke@435 818 oop obj = oop(obj_addr);
duke@435 819 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 820 } else {
duke@435 821 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 822 obj_addr += fc->size();
duke@435 823 }
duke@435 824 while (obj_addr < t) {
duke@435 825 HeapWord* obj = obj_addr;
duke@435 826 obj_addr += block_size(obj_addr);
duke@435 827 // If "obj_addr" is not greater than top, then the
duke@435 828 // entire object "obj" is within the region.
duke@435 829 if (obj_addr <= t) {
duke@435 830 if (block_is_obj(obj)) {
duke@435 831 oop(obj)->oop_iterate(cl);
duke@435 832 }
duke@435 833 } else {
duke@435 834 // "obj" extends beyond end of region
duke@435 835 if (block_is_obj(obj)) {
duke@435 836 oop(obj)->oop_iterate(&smr_blk);
duke@435 837 }
duke@435 838 break;
duke@435 839 }
duke@435 840 }
duke@435 841 }
duke@435 842
duke@435 843 // NOTE: In the following methods, in order to safely be able to
duke@435 844 // apply the closure to an object, we need to be sure that the
duke@435 845 // object has been initialized. We are guaranteed that an object
duke@435 846 // is initialized if we are holding the Heap_lock with the
duke@435 847 // world stopped.
duke@435 848 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 849 if (is_init_completed()) {
duke@435 850 assert_locked_or_safepoint(Heap_lock);
duke@435 851 if (Universe::is_fully_initialized()) {
duke@435 852 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 853 "Required for objects to be initialized");
duke@435 854 }
duke@435 855 } // else make a concession at vm start-up
duke@435 856 }
duke@435 857
duke@435 858 // Apply the given closure to each object in the space
duke@435 859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 860 assert_lock_strong(freelistLock());
duke@435 861 NOT_PRODUCT(verify_objects_initialized());
duke@435 862 HeapWord *cur, *limit;
duke@435 863 size_t curSize;
duke@435 864 for (cur = bottom(), limit = end(); cur < limit;
duke@435 865 cur += curSize) {
duke@435 866 curSize = block_size(cur);
duke@435 867 if (block_is_obj(cur)) {
duke@435 868 blk->do_object(oop(cur));
duke@435 869 }
duke@435 870 }
duke@435 871 }
duke@435 872
jmasa@952 873 // Apply the given closure to each live object in the space
jmasa@952 874 // The usage of CompactibleFreeListSpace
jmasa@952 875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 876 // objects in the space with references to objects that are no longer
jmasa@952 877 // valid. For example, an object may reference another object
jmasa@952 878 // that has already been sweep up (collected). This method uses
jmasa@952 879 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 880 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 881 // object is decided.
jmasa@952 882
jmasa@952 883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 884 assert_lock_strong(freelistLock());
jmasa@952 885 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 886 HeapWord *cur, *limit;
jmasa@952 887 size_t curSize;
jmasa@952 888 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 889 cur += curSize) {
jmasa@952 890 curSize = block_size(cur);
jmasa@952 891 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 892 blk->do_object(oop(cur));
jmasa@952 893 }
jmasa@952 894 }
jmasa@952 895 }
jmasa@952 896
duke@435 897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 898 UpwardsObjectClosure* cl) {
ysr@1580 899 assert_locked(freelistLock());
duke@435 900 NOT_PRODUCT(verify_objects_initialized());
duke@435 901 Space::object_iterate_mem(mr, cl);
duke@435 902 }
duke@435 903
duke@435 904 // Callers of this iterator beware: The closure application should
duke@435 905 // be robust in the face of uninitialized objects and should (always)
duke@435 906 // return a correct size so that the next addr + size below gives us a
duke@435 907 // valid block boundary. [See for instance,
duke@435 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 909 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 910 HeapWord*
duke@435 911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 912 assert_lock_strong(freelistLock());
duke@435 913 HeapWord *addr, *last;
duke@435 914 size_t size;
duke@435 915 for (addr = bottom(), last = end();
duke@435 916 addr < last; addr += size) {
duke@435 917 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 918 if (fc->is_free()) {
duke@435 919 // Since we hold the free list lock, which protects direct
duke@435 920 // allocation in this generation by mutators, a free object
duke@435 921 // will remain free throughout this iteration code.
duke@435 922 size = fc->size();
duke@435 923 } else {
duke@435 924 // Note that the object need not necessarily be initialized,
duke@435 925 // because (for instance) the free list lock does NOT protect
duke@435 926 // object initialization. The closure application below must
duke@435 927 // therefore be correct in the face of uninitialized objects.
duke@435 928 size = cl->do_object_careful(oop(addr));
duke@435 929 if (size == 0) {
duke@435 930 // An unparsable object found. Signal early termination.
duke@435 931 return addr;
duke@435 932 }
duke@435 933 }
duke@435 934 }
duke@435 935 return NULL;
duke@435 936 }
duke@435 937
duke@435 938 // Callers of this iterator beware: The closure application should
duke@435 939 // be robust in the face of uninitialized objects and should (always)
duke@435 940 // return a correct size so that the next addr + size below gives us a
duke@435 941 // valid block boundary. [See for instance,
duke@435 942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 943 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 944 HeapWord*
duke@435 945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 946 ObjectClosureCareful* cl) {
duke@435 947 assert_lock_strong(freelistLock());
duke@435 948 // Can't use used_region() below because it may not necessarily
duke@435 949 // be the same as [bottom(),end()); although we could
duke@435 950 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 951 // that appears too cumbersome, so we just do the simpler check
duke@435 952 // in the assertion below.
duke@435 953 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 954 "mr should be non-empty and within used space");
duke@435 955 HeapWord *addr, *end;
duke@435 956 size_t size;
duke@435 957 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 958 addr < end; addr += size) {
duke@435 959 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 960 if (fc->is_free()) {
duke@435 961 // Since we hold the free list lock, which protects direct
duke@435 962 // allocation in this generation by mutators, a free object
duke@435 963 // will remain free throughout this iteration code.
duke@435 964 size = fc->size();
duke@435 965 } else {
duke@435 966 // Note that the object need not necessarily be initialized,
duke@435 967 // because (for instance) the free list lock does NOT protect
duke@435 968 // object initialization. The closure application below must
duke@435 969 // therefore be correct in the face of uninitialized objects.
duke@435 970 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 971 if (size == 0) {
duke@435 972 // An unparsable object found. Signal early termination.
duke@435 973 return addr;
duke@435 974 }
duke@435 975 }
duke@435 976 }
duke@435 977 return NULL;
duke@435 978 }
duke@435 979
duke@435 980
ysr@777 981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 982 NOT_PRODUCT(verify_objects_initialized());
duke@435 983 return _bt.block_start(p);
duke@435 984 }
duke@435 985
duke@435 986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 987 return _bt.block_start_careful(p);
duke@435 988 }
duke@435 989
duke@435 990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 991 NOT_PRODUCT(verify_objects_initialized());
duke@435 992 // This must be volatile, or else there is a danger that the compiler
duke@435 993 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 994 // the value read the first time in a register.
duke@435 995 while (true) {
duke@435 996 // We must do this until we get a consistent view of the object.
coleenp@622 997 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 998 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 999 size_t res = fc->size();
goetz@6493 1000
goetz@6493 1001 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1002 // block's free bit was set and we have read the size of the
goetz@6493 1003 // block. Acquire and check the free bit again. If the block is
goetz@6493 1004 // still free, the read size is correct.
goetz@6493 1005 OrderAccess::acquire();
goetz@6493 1006
coleenp@622 1007 // If the object is still a free chunk, return the size, else it
coleenp@622 1008 // has been allocated so try again.
coleenp@622 1009 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1010 assert(res != 0, "Block size should not be 0");
duke@435 1011 return res;
duke@435 1012 }
coleenp@622 1013 } else {
coleenp@622 1014 // must read from what 'p' points to in each loop.
coleenp@4037 1015 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1016 if (k != NULL) {
coleenp@4037 1017 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1018 oop o = (oop)p;
coleenp@622 1019 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
goetz@6493 1020
goetz@6493 1021 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1022 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1023 // size (third word) is consistent.
goetz@6493 1024 OrderAccess::acquire();
goetz@6493 1025
coleenp@4037 1026 size_t res = o->size_given_klass(k);
coleenp@622 1027 res = adjustObjectSize(res);
coleenp@622 1028 assert(res != 0, "Block size should not be 0");
coleenp@622 1029 return res;
coleenp@622 1030 }
duke@435 1031 }
duke@435 1032 }
duke@435 1033 }
duke@435 1034
coleenp@4037 1035 // TODO: Now that is_parsable is gone, we should combine these two functions.
duke@435 1036 // A variant of the above that uses the Printezis bits for
duke@435 1037 // unparsable but allocated objects. This avoids any possible
duke@435 1038 // stalls waiting for mutators to initialize objects, and is
duke@435 1039 // thus potentially faster than the variant above. However,
duke@435 1040 // this variant may return a zero size for a block that is
duke@435 1041 // under mutation and for which a consistent size cannot be
duke@435 1042 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1043 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1044 const CMSCollector* c)
duke@435 1045 const {
duke@435 1046 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1047 // This must be volatile, or else there is a danger that the compiler
duke@435 1048 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1049 // the value read the first time in a register.
duke@435 1050 DEBUG_ONLY(uint loops = 0;)
duke@435 1051 while (true) {
duke@435 1052 // We must do this until we get a consistent view of the object.
coleenp@622 1053 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1054 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1055 size_t res = fc->size();
goetz@6493 1056
goetz@6493 1057 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1058 // free bit of the block was set and we have read the size of
goetz@6493 1059 // the block. Acquire and check the free bit again. If the
goetz@6493 1060 // block is still free, the read size is correct.
goetz@6493 1061 OrderAccess::acquire();
goetz@6493 1062
coleenp@622 1063 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1064 assert(res != 0, "Block size should not be 0");
duke@435 1065 assert(loops == 0, "Should be 0");
duke@435 1066 return res;
duke@435 1067 }
duke@435 1068 } else {
coleenp@622 1069 // must read from what 'p' points to in each loop.
coleenp@4037 1070 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1071 // We trust the size of any object that has a non-NULL
ysr@2533 1072 // klass and (for those in the perm gen) is parsable
ysr@2533 1073 // -- irrespective of its conc_safe-ty.
coleenp@4037 1074 if (k != NULL) {
coleenp@4037 1075 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1076 oop o = (oop)p;
coleenp@622 1077 assert(o->is_oop(), "Should be an oop");
goetz@6493 1078
goetz@6493 1079 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1080 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1081 // size (third word) is consistent.
goetz@6493 1082 OrderAccess::acquire();
goetz@6493 1083
coleenp@4037 1084 size_t res = o->size_given_klass(k);
coleenp@622 1085 res = adjustObjectSize(res);
coleenp@622 1086 assert(res != 0, "Block size should not be 0");
coleenp@622 1087 return res;
coleenp@622 1088 } else {
ysr@2533 1089 // May return 0 if P-bits not present.
coleenp@622 1090 return c->block_size_if_printezis_bits(p);
coleenp@622 1091 }
duke@435 1092 }
duke@435 1093 assert(loops == 0, "Can loop at most once");
duke@435 1094 DEBUG_ONLY(loops++;)
duke@435 1095 }
duke@435 1096 }
duke@435 1097
duke@435 1098 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1099 NOT_PRODUCT(verify_objects_initialized());
duke@435 1100 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1101 FreeChunk* fc = (FreeChunk*)p;
jmasa@3732 1102 if (fc->is_free()) {
duke@435 1103 return fc->size();
duke@435 1104 } else {
duke@435 1105 // Ignore mark word because this may be a recently promoted
duke@435 1106 // object whose mark word is used to chain together grey
duke@435 1107 // objects (the last one would have a null value).
duke@435 1108 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1109 return adjustObjectSize(oop(p)->size());
duke@435 1110 }
duke@435 1111 }
duke@435 1112
duke@435 1113 // This implementation assumes that the property of "being an object" is
duke@435 1114 // stable. But being a free chunk may not be (because of parallel
duke@435 1115 // promotion.)
duke@435 1116 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1117 FreeChunk* fc = (FreeChunk*)p;
duke@435 1118 assert(is_in_reserved(p), "Should be in space");
duke@435 1119 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1120 // assertion may fail because prepare_for_compaction() uses
duke@435 1121 // space that is garbage to maintain information on ranges of
duke@435 1122 // live objects so that these live ranges can be moved as a whole.
duke@435 1123 // Comment out this assertion until that problem can be solved
duke@435 1124 // (i.e., that the block start calculation may look at objects
duke@435 1125 // at address below "p" in finding the object that contains "p"
duke@435 1126 // and those objects (if garbage) may have been modified to hold
duke@435 1127 // live range information.
jmasa@2188 1128 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1129 // "Should be a block boundary");
coleenp@622 1130 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@4037 1131 Klass* k = oop(p)->klass_or_null();
duke@435 1132 if (k != NULL) {
duke@435 1133 // Ignore mark word because it may have been used to
duke@435 1134 // chain together promoted objects (the last one
duke@435 1135 // would have a null value).
duke@435 1136 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1137 return true;
duke@435 1138 } else {
duke@435 1139 return false; // Was not an object at the start of collection.
duke@435 1140 }
duke@435 1141 }
duke@435 1142
duke@435 1143 // Check if the object is alive. This fact is checked either by consulting
duke@435 1144 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1145 // generation and we're not in the sweeping phase, by checking the
duke@435 1146 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1147 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1148 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1149 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1150 "Else races are possible");
ysr@2293 1151 assert(block_is_obj(p), "The address should point to an object");
duke@435 1152
duke@435 1153 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1154 // for both perm gen and old gen.
duke@435 1155 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1156 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1157 // main marking bit map (live_map below) is locked,
duke@435 1158 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1159 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1160 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1161 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1162 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1163 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1164 // if/when the perm gen goes away in the future.
duke@435 1165 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1166 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1167 return live_map->par_isMarked((HeapWord*) p);
duke@435 1168 }
duke@435 1169 return true;
duke@435 1170 }
duke@435 1171
duke@435 1172 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1173 FreeChunk* fc = (FreeChunk*)p;
duke@435 1174 assert(is_in_reserved(p), "Should be in space");
duke@435 1175 assert(_bt.block_start(p) == p, "Should be a block boundary");
jmasa@3732 1176 if (!fc->is_free()) {
duke@435 1177 // Ignore mark word because it may have been used to
duke@435 1178 // chain together promoted objects (the last one
duke@435 1179 // would have a null value).
duke@435 1180 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1181 return true;
duke@435 1182 }
duke@435 1183 return false;
duke@435 1184 }
duke@435 1185
duke@435 1186 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1187 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1188 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1189 size_t size = 0;
duke@435 1190 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1191 debug_only(
duke@435 1192 // We may be calling here without the lock in which case we
duke@435 1193 // won't do this modest sanity check.
duke@435 1194 if (freelistLock()->owned_by_self()) {
duke@435 1195 size_t total_list_size = 0;
duke@435 1196 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1197 fc = fc->next()) {
duke@435 1198 total_list_size += i;
duke@435 1199 }
duke@435 1200 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1201 "Count in list is incorrect");
duke@435 1202 }
duke@435 1203 )
duke@435 1204 size += i * _indexedFreeList[i].count();
duke@435 1205 }
duke@435 1206 return size;
duke@435 1207 }
duke@435 1208
duke@435 1209 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1210 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1211 return allocate(size);
duke@435 1212 }
duke@435 1213
duke@435 1214 HeapWord*
duke@435 1215 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1216 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1217 }
duke@435 1218
duke@435 1219 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1220 assert_lock_strong(freelistLock());
duke@435 1221 HeapWord* res = NULL;
duke@435 1222 assert(size == adjustObjectSize(size),
duke@435 1223 "use adjustObjectSize() before calling into allocate()");
duke@435 1224
duke@435 1225 if (_adaptive_freelists) {
duke@435 1226 res = allocate_adaptive_freelists(size);
duke@435 1227 } else { // non-adaptive free lists
duke@435 1228 res = allocate_non_adaptive_freelists(size);
duke@435 1229 }
duke@435 1230
duke@435 1231 if (res != NULL) {
duke@435 1232 // check that res does lie in this space!
duke@435 1233 assert(is_in_reserved(res), "Not in this space!");
duke@435 1234 assert(is_aligned((void*)res), "alignment check");
duke@435 1235
duke@435 1236 FreeChunk* fc = (FreeChunk*)res;
duke@435 1237 fc->markNotFree();
jmasa@3732 1238 assert(!fc->is_free(), "shouldn't be marked free");
coleenp@622 1239 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1240 // Verify that the block offset table shows this to
duke@435 1241 // be a single block, but not one which is unallocated.
duke@435 1242 _bt.verify_single_block(res, size);
duke@435 1243 _bt.verify_not_unallocated(res, size);
duke@435 1244 // mangle a just allocated object with a distinct pattern.
duke@435 1245 debug_only(fc->mangleAllocated(size));
duke@435 1246 }
duke@435 1247
duke@435 1248 return res;
duke@435 1249 }
duke@435 1250
duke@435 1251 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1252 HeapWord* res = NULL;
duke@435 1253 // try and use linear allocation for smaller blocks
duke@435 1254 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1255 // if successful, the following also adjusts block offset table
duke@435 1256 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1257 }
duke@435 1258 // Else triage to indexed lists for smaller sizes
duke@435 1259 if (res == NULL) {
duke@435 1260 if (size < SmallForDictionary) {
duke@435 1261 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1262 } else {
duke@435 1263 // else get it from the big dictionary; if even this doesn't
duke@435 1264 // work we are out of luck.
duke@435 1265 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1266 }
duke@435 1267 }
duke@435 1268
duke@435 1269 return res;
duke@435 1270 }
duke@435 1271
duke@435 1272 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1273 assert_lock_strong(freelistLock());
duke@435 1274 HeapWord* res = NULL;
duke@435 1275 assert(size == adjustObjectSize(size),
duke@435 1276 "use adjustObjectSize() before calling into allocate()");
duke@435 1277
duke@435 1278 // Strategy
duke@435 1279 // if small
duke@435 1280 // exact size from small object indexed list if small
duke@435 1281 // small or large linear allocation block (linAB) as appropriate
duke@435 1282 // take from lists of greater sized chunks
duke@435 1283 // else
duke@435 1284 // dictionary
duke@435 1285 // small or large linear allocation block if it has the space
duke@435 1286 // Try allocating exact size from indexTable first
duke@435 1287 if (size < IndexSetSize) {
duke@435 1288 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1289 if(res != NULL) {
duke@435 1290 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1291 "Not removed from free list");
duke@435 1292 // no block offset table adjustment is necessary on blocks in
duke@435 1293 // the indexed lists.
duke@435 1294
duke@435 1295 // Try allocating from the small LinAB
duke@435 1296 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1297 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1298 // if successful, the above also adjusts block offset table
duke@435 1299 // Note that this call will refill the LinAB to
duke@435 1300 // satisfy the request. This is different that
duke@435 1301 // evm.
duke@435 1302 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1303 } else {
duke@435 1304 // Raid the exact free lists larger than size, even if they are not
duke@435 1305 // overpopulated.
duke@435 1306 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1307 }
duke@435 1308 } else {
duke@435 1309 // Big objects get allocated directly from the dictionary.
duke@435 1310 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1311 if (res == NULL) {
duke@435 1312 // Try hard not to fail since an allocation failure will likely
duke@435 1313 // trigger a synchronous GC. Try to get the space from the
duke@435 1314 // allocation blocks.
duke@435 1315 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1316 }
duke@435 1317 }
duke@435 1318
duke@435 1319 return res;
duke@435 1320 }
duke@435 1321
duke@435 1322 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1323 // when promoting obj.
duke@435 1324 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1325 // Depending on the object size, expansion may require refilling either a
duke@435 1326 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1327 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1328 size_t space = obj_size;
duke@435 1329 if (!_adaptive_freelists) {
duke@435 1330 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1331 }
duke@435 1332 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1333 return space;
duke@435 1334 }
duke@435 1335
duke@435 1336 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1337 FreeChunk* ret;
duke@435 1338
duke@435 1339 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1340 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1341 "Should not be here");
duke@435 1342
duke@435 1343 size_t i;
duke@435 1344 size_t currSize = numWords + MinChunkSize;
duke@435 1345 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1346 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 1347 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
duke@435 1348 if (fl->head()) {
duke@435 1349 ret = getFromListGreater(fl, numWords);
jmasa@3732 1350 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1351 return ret;
duke@435 1352 }
duke@435 1353 }
duke@435 1354
duke@435 1355 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1356 (size_t)(numWords + MinChunkSize));
duke@435 1357
duke@435 1358 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1359 fragmentation that can't be handled. */
duke@435 1360 {
jmasa@3732 1361 ret = dictionary()->get_chunk(currSize);
duke@435 1362 if (ret != NULL) {
duke@435 1363 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1364 "Chunk is too small");
duke@435 1365 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1366 /* Carve returned chunk. */
duke@435 1367 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1368 /* Label this as no longer a free chunk. */
jmasa@3732 1369 assert(ret->is_free(), "This chunk should be free");
jmasa@3732 1370 ret->link_prev(NULL);
duke@435 1371 }
jmasa@3732 1372 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1373 return ret;
duke@435 1374 }
duke@435 1375 ShouldNotReachHere();
duke@435 1376 }
duke@435 1377
ysr@3220 1378 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1379 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
jmasa@3732 1380 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
duke@435 1381 }
duke@435 1382
ysr@3220 1383 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1384 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1385 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1386 "Linear allocation block shows incorrect size");
ysr@3220 1387 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1388 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1389 }
ysr@3220 1390
ysr@3220 1391 // Check if the purported free chunk is present either as a linear
ysr@3220 1392 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1393 // or the larger free blocks kept in the binary tree dictionary.
jmasa@3732 1394 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
ysr@3220 1395 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1396 return true;
ysr@3220 1397 } else if (fc->size() < IndexSetSize) {
ysr@3220 1398 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1399 } else {
jmasa@3732 1400 return dictionary()->verify_chunk_in_free_list(fc);
duke@435 1401 }
duke@435 1402 }
duke@435 1403
duke@435 1404 #ifndef PRODUCT
duke@435 1405 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1406 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1407 }
ysr@1580 1408
ysr@1580 1409 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1410 CMSLockVerifier::assert_locked(lock);
ysr@1580 1411 }
duke@435 1412 #endif
duke@435 1413
duke@435 1414 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1415 // In the parallel case, the main thread holds the free list lock
duke@435 1416 // on behalf the parallel threads.
duke@435 1417 FreeChunk* fc;
duke@435 1418 {
duke@435 1419 // If GC is parallel, this might be called by several threads.
duke@435 1420 // This should be rare enough that the locking overhead won't affect
duke@435 1421 // the sequential code.
duke@435 1422 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1423 Mutex::_no_safepoint_check_flag);
duke@435 1424 fc = getChunkFromDictionary(size);
duke@435 1425 }
duke@435 1426 if (fc != NULL) {
duke@435 1427 fc->dontCoalesce();
jmasa@3732 1428 assert(fc->is_free(), "Should be free, but not coalescable");
duke@435 1429 // Verify that the block offset table shows this to
duke@435 1430 // be a single block, but not one which is unallocated.
duke@435 1431 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1432 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1433 }
duke@435 1434 return fc;
duke@435 1435 }
duke@435 1436
coleenp@548 1437 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1438 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1439 assert_locked();
duke@435 1440
duke@435 1441 // if we are tracking promotions, then first ensure space for
duke@435 1442 // promotion (including spooling space for saving header if necessary).
duke@435 1443 // then allocate and copy, then track promoted info if needed.
duke@435 1444 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1445 // be displaced and in this case restoration of the mark word
duke@435 1446 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1447 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1448 return NULL;
duke@435 1449 }
duke@435 1450 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1451 // additional call through the allocate(size_t) form. Having
duke@435 1452 // the compile inline the call is problematic because allocate(size_t)
duke@435 1453 // is a virtual method.
duke@435 1454 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1455 if (res != NULL) {
duke@435 1456 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1457 // if we should be tracking promotions, do so.
duke@435 1458 if (_promoInfo.tracking()) {
duke@435 1459 _promoInfo.track((PromotedObject*)res);
duke@435 1460 }
duke@435 1461 }
duke@435 1462 return oop(res);
duke@435 1463 }
duke@435 1464
duke@435 1465 HeapWord*
duke@435 1466 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1467 assert_locked();
duke@435 1468 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1469 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1470 "maximum from smallLinearAllocBlock");
duke@435 1471 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1472 }
duke@435 1473
duke@435 1474 HeapWord*
duke@435 1475 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1476 size_t size) {
duke@435 1477 assert_locked();
duke@435 1478 assert(size >= MinChunkSize, "too small");
duke@435 1479 HeapWord* res = NULL;
duke@435 1480 // Try to do linear allocation from blk, making sure that
duke@435 1481 if (blk->_word_size == 0) {
duke@435 1482 // We have probably been unable to fill this either in the prologue or
duke@435 1483 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1484 // next time.
duke@435 1485 assert(blk->_ptr == NULL, "consistency check");
duke@435 1486 return NULL;
duke@435 1487 }
duke@435 1488 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1489 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1490 if (res != NULL) return res;
duke@435 1491
duke@435 1492 // about to exhaust this linear allocation block
duke@435 1493 if (blk->_word_size == size) { // exactly satisfied
duke@435 1494 res = blk->_ptr;
duke@435 1495 _bt.allocated(res, blk->_word_size);
duke@435 1496 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1497 size_t sz = blk->_word_size;
duke@435 1498 // Update _unallocated_block if the size is such that chunk would be
duke@435 1499 // returned to the indexed free list. All other chunks in the indexed
duke@435 1500 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1501 // has already been adjusted for them. Do it here so that the cost
duke@435 1502 // for all chunks added back to the indexed free lists.
ysr@1580 1503 if (sz < SmallForDictionary) {
ysr@1580 1504 _bt.allocated(blk->_ptr, sz);
duke@435 1505 }
duke@435 1506 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1507 addChunkToFreeLists(blk->_ptr, sz);
jmasa@3732 1508 split_birth(sz);
duke@435 1509 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1510 } else {
duke@435 1511 // A refilled block would not satisfy the request.
duke@435 1512 return NULL;
duke@435 1513 }
duke@435 1514
duke@435 1515 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1516 refillLinearAllocBlock(blk);
duke@435 1517 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1518 "block was replenished");
duke@435 1519 if (res != NULL) {
jmasa@3732 1520 split_birth(size);
duke@435 1521 repairLinearAllocBlock(blk);
duke@435 1522 } else if (blk->_ptr != NULL) {
duke@435 1523 res = blk->_ptr;
duke@435 1524 size_t blk_size = blk->_word_size;
duke@435 1525 blk->_word_size -= size;
duke@435 1526 blk->_ptr += size;
jmasa@3732 1527 split_birth(size);
duke@435 1528 repairLinearAllocBlock(blk);
duke@435 1529 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1530 // view of the BOT and free blocks.
duke@435 1531 // Above must occur before BOT is updated below.
ysr@2071 1532 OrderAccess::storestore();
duke@435 1533 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1534 }
duke@435 1535 return res;
duke@435 1536 }
duke@435 1537
duke@435 1538 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1539 LinearAllocBlock* blk,
duke@435 1540 size_t size) {
duke@435 1541 assert_locked();
duke@435 1542 assert(size >= MinChunkSize, "too small");
duke@435 1543
duke@435 1544 HeapWord* res = NULL;
duke@435 1545 // This is the common case. Keep it simple.
duke@435 1546 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1547 assert(blk->_ptr != NULL, "consistency check");
duke@435 1548 res = blk->_ptr;
duke@435 1549 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1550 // indicates the start of the linAB. The split_block() updates the
duke@435 1551 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1552 // next chunk to be allocated).
duke@435 1553 size_t blk_size = blk->_word_size;
duke@435 1554 blk->_word_size -= size;
duke@435 1555 blk->_ptr += size;
jmasa@3732 1556 split_birth(size);
duke@435 1557 repairLinearAllocBlock(blk);
duke@435 1558 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1559 // view of the BOT and free blocks.
duke@435 1560 // Above must occur before BOT is updated below.
ysr@2071 1561 OrderAccess::storestore();
duke@435 1562 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1563 _bt.allocated(res, size);
duke@435 1564 }
duke@435 1565 return res;
duke@435 1566 }
duke@435 1567
duke@435 1568 FreeChunk*
duke@435 1569 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1570 assert_locked();
duke@435 1571 assert(size < SmallForDictionary, "just checking");
duke@435 1572 FreeChunk* res;
jmasa@3732 1573 res = _indexedFreeList[size].get_chunk_at_head();
duke@435 1574 if (res == NULL) {
duke@435 1575 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1576 }
duke@435 1577 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1578 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1579 return res;
duke@435 1580 }
duke@435 1581
duke@435 1582 FreeChunk*
ysr@1580 1583 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1584 bool replenish) {
duke@435 1585 assert_locked();
duke@435 1586 FreeChunk* fc = NULL;
duke@435 1587 if (size < SmallForDictionary) {
duke@435 1588 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1589 _indexedFreeList[size].surplus() <= 0,
duke@435 1590 "List for this size should be empty or under populated");
duke@435 1591 // Try best fit in exact lists before replenishing the list
duke@435 1592 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1593 // Replenish list.
duke@435 1594 //
duke@435 1595 // Things tried that failed.
duke@435 1596 // Tried allocating out of the two LinAB's first before
duke@435 1597 // replenishing lists.
duke@435 1598 // Tried small linAB of size 256 (size in indexed list)
duke@435 1599 // and replenishing indexed lists from the small linAB.
duke@435 1600 //
duke@435 1601 FreeChunk* newFc = NULL;
ysr@1580 1602 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1603 if (replenish_size < SmallForDictionary) {
duke@435 1604 // Do not replenish from an underpopulated size.
duke@435 1605 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1606 _indexedFreeList[replenish_size].head() != NULL) {
jmasa@3732 1607 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
ysr@1580 1608 } else if (bestFitFirst()) {
duke@435 1609 newFc = bestFitSmall(replenish_size);
duke@435 1610 }
duke@435 1611 }
ysr@1580 1612 if (newFc == NULL && replenish_size > size) {
ysr@1580 1613 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1614 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1615 }
ysr@1580 1616 // Note: The stats update re split-death of block obtained above
ysr@1580 1617 // will be recorded below precisely when we know we are going to
ysr@1580 1618 // be actually splitting it into more than one pieces below.
duke@435 1619 if (newFc != NULL) {
ysr@1580 1620 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1621 // Replenish this list and return one block to caller.
ysr@1580 1622 size_t i;
ysr@1580 1623 FreeChunk *curFc, *nextFc;
ysr@1580 1624 size_t num_blk = newFc->size() / size;
ysr@1580 1625 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1626 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1627 if (num_blk > 1) {
ysr@1580 1628 // we are sure we will be splitting the block just obtained
ysr@1580 1629 // into multiple pieces; record the split-death of the original
ysr@1580 1630 splitDeath(replenish_size);
ysr@1580 1631 }
ysr@1580 1632 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1633 // The last chunk is not added to the lists but is returned as the
ysr@1580 1634 // free chunk.
ysr@1580 1635 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1636 i = 0;
ysr@1580 1637 i < (num_blk - 1);
ysr@1580 1638 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1639 i++) {
jmasa@3732 1640 curFc->set_size(size);
ysr@1580 1641 // Don't record this as a return in order to try and
ysr@1580 1642 // determine the "returns" from a GC.
ysr@1580 1643 _bt.verify_not_unallocated((HeapWord*) fc, size);
jmasa@3732 1644 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
ysr@1580 1645 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1646 split_birth(size);
ysr@1580 1647 // Don't record the initial population of the indexed list
ysr@1580 1648 // as a split birth.
ysr@1580 1649 }
ysr@1580 1650
ysr@1580 1651 // check that the arithmetic was OK above
ysr@1580 1652 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1653 "inconsistency in carving newFc");
jmasa@3732 1654 curFc->set_size(size);
duke@435 1655 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1656 split_birth(size);
ysr@1580 1657 fc = curFc;
ysr@1580 1658 } else {
ysr@1580 1659 // Return entire block to caller
ysr@1580 1660 fc = newFc;
duke@435 1661 }
duke@435 1662 }
duke@435 1663 }
duke@435 1664 } else {
duke@435 1665 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1666 // replenish the indexed free list.
duke@435 1667 fc = getChunkFromDictionaryExact(size);
duke@435 1668 }
jmasa@3732 1669 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
duke@435 1670 return fc;
duke@435 1671 }
duke@435 1672
duke@435 1673 FreeChunk*
duke@435 1674 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1675 assert_locked();
jmasa@4488 1676 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1677 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1678 if (fc == NULL) {
duke@435 1679 return NULL;
duke@435 1680 }
duke@435 1681 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1682 if (fc->size() >= size + MinChunkSize) {
duke@435 1683 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1684 }
duke@435 1685 assert(fc->size() >= size, "chunk too small");
duke@435 1686 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1687 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1688 return fc;
duke@435 1689 }
duke@435 1690
duke@435 1691 FreeChunk*
duke@435 1692 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1693 assert_locked();
jmasa@4488 1694 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1695 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1696 if (fc == NULL) {
duke@435 1697 return fc;
duke@435 1698 }
duke@435 1699 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1700 if (fc->size() == size) {
duke@435 1701 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1702 return fc;
duke@435 1703 }
jmasa@3732 1704 assert(fc->size() > size, "get_chunk() guarantee");
duke@435 1705 if (fc->size() < size + MinChunkSize) {
duke@435 1706 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1707 returnChunkToDictionary(fc);
jmasa@4488 1708 fc = _dictionary->get_chunk(size + MinChunkSize,
jmasa@4488 1709 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1710 if (fc == NULL) {
duke@435 1711 return NULL;
duke@435 1712 }
duke@435 1713 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1714 }
duke@435 1715 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1716 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1717 assert(fc->size() == size, "chunk is wrong size");
duke@435 1718 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1719 return fc;
duke@435 1720 }
duke@435 1721
duke@435 1722 void
duke@435 1723 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1724 assert_locked();
duke@435 1725
duke@435 1726 size_t size = chunk->size();
duke@435 1727 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1728 // adjust _unallocated_block downward, as necessary
duke@435 1729 _bt.freed((HeapWord*)chunk, size);
jmasa@3732 1730 _dictionary->return_chunk(chunk);
ysr@1580 1731 #ifndef PRODUCT
ysr@1580 1732 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
goetz@6337 1733 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
goetz@6337 1734 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
jmasa@4196 1735 tl->verify_stats();
ysr@1580 1736 }
ysr@1580 1737 #endif // PRODUCT
duke@435 1738 }
duke@435 1739
duke@435 1740 void
duke@435 1741 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1742 assert_locked();
duke@435 1743 size_t size = fc->size();
duke@435 1744 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1745 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1746 if (_adaptive_freelists) {
jmasa@3732 1747 _indexedFreeList[size].return_chunk_at_tail(fc);
duke@435 1748 } else {
jmasa@3732 1749 _indexedFreeList[size].return_chunk_at_head(fc);
duke@435 1750 }
ysr@1580 1751 #ifndef PRODUCT
ysr@1580 1752 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1753 _indexedFreeList[size].verify_stats();
ysr@1580 1754 }
ysr@1580 1755 #endif // PRODUCT
duke@435 1756 }
duke@435 1757
duke@435 1758 // Add chunk to end of last block -- if it's the largest
duke@435 1759 // block -- and update BOT and census data. We would
duke@435 1760 // of course have preferred to coalesce it with the
duke@435 1761 // last block, but it's currently less expensive to find the
duke@435 1762 // largest block than it is to find the last.
duke@435 1763 void
duke@435 1764 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1765 HeapWord* chunk, size_t size) {
duke@435 1766 // check that the chunk does lie in this space!
duke@435 1767 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1768 // One of the parallel gc task threads may be here
duke@435 1769 // whilst others are allocating.
duke@435 1770 Mutex* lock = NULL;
duke@435 1771 if (ParallelGCThreads != 0) {
duke@435 1772 lock = &_parDictionaryAllocLock;
duke@435 1773 }
duke@435 1774 FreeChunk* ec;
duke@435 1775 {
duke@435 1776 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
jmasa@3732 1777 ec = dictionary()->find_largest_dict(); // get largest block
jmasa@4196 1778 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
duke@435 1779 // It's a coterminal block - we can coalesce.
duke@435 1780 size_t old_size = ec->size();
duke@435 1781 coalDeath(old_size);
duke@435 1782 removeChunkFromDictionary(ec);
duke@435 1783 size += old_size;
duke@435 1784 } else {
duke@435 1785 ec = (FreeChunk*)chunk;
duke@435 1786 }
duke@435 1787 }
jmasa@3732 1788 ec->set_size(size);
duke@435 1789 debug_only(ec->mangleFreed(size));
brutisso@5166 1790 if (size < SmallForDictionary && ParallelGCThreads != 0) {
duke@435 1791 lock = _indexedFreeListParLocks[size];
duke@435 1792 }
duke@435 1793 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1794 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1795 // record the birth under the lock since the recording involves
duke@435 1796 // manipulation of the list on which the chunk lives and
duke@435 1797 // if the chunk is allocated and is the last on the list,
duke@435 1798 // the list can go away.
duke@435 1799 coalBirth(size);
duke@435 1800 }
duke@435 1801
duke@435 1802 void
duke@435 1803 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1804 size_t size) {
duke@435 1805 // check that the chunk does lie in this space!
duke@435 1806 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1807 assert_locked();
duke@435 1808 _bt.verify_single_block(chunk, size);
duke@435 1809
duke@435 1810 FreeChunk* fc = (FreeChunk*) chunk;
jmasa@3732 1811 fc->set_size(size);
duke@435 1812 debug_only(fc->mangleFreed(size));
duke@435 1813 if (size < SmallForDictionary) {
duke@435 1814 returnChunkToFreeList(fc);
duke@435 1815 } else {
duke@435 1816 returnChunkToDictionary(fc);
duke@435 1817 }
duke@435 1818 }
duke@435 1819
duke@435 1820 void
duke@435 1821 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1822 size_t size, bool coalesced) {
duke@435 1823 assert_locked();
duke@435 1824 assert(chunk != NULL, "null chunk");
duke@435 1825 if (coalesced) {
duke@435 1826 // repair BOT
duke@435 1827 _bt.single_block(chunk, size);
duke@435 1828 }
duke@435 1829 addChunkToFreeLists(chunk, size);
duke@435 1830 }
duke@435 1831
duke@435 1832 // We _must_ find the purported chunk on our free lists;
duke@435 1833 // we assert if we don't.
duke@435 1834 void
duke@435 1835 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1836 size_t size = fc->size();
duke@435 1837 assert_locked();
duke@435 1838 debug_only(verifyFreeLists());
duke@435 1839 if (size < SmallForDictionary) {
duke@435 1840 removeChunkFromIndexedFreeList(fc);
duke@435 1841 } else {
duke@435 1842 removeChunkFromDictionary(fc);
duke@435 1843 }
duke@435 1844 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1845 debug_only(verifyFreeLists());
duke@435 1846 }
duke@435 1847
duke@435 1848 void
duke@435 1849 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1850 size_t size = fc->size();
duke@435 1851 assert_locked();
duke@435 1852 assert(fc != NULL, "null chunk");
duke@435 1853 _bt.verify_single_block((HeapWord*)fc, size);
jmasa@3732 1854 _dictionary->remove_chunk(fc);
duke@435 1855 // adjust _unallocated_block upward, as necessary
duke@435 1856 _bt.allocated((HeapWord*)fc, size);
duke@435 1857 }
duke@435 1858
duke@435 1859 void
duke@435 1860 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1861 assert_locked();
duke@435 1862 size_t size = fc->size();
duke@435 1863 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1864 NOT_PRODUCT(
duke@435 1865 if (FLSVerifyIndexTable) {
duke@435 1866 verifyIndexedFreeList(size);
duke@435 1867 }
duke@435 1868 )
jmasa@3732 1869 _indexedFreeList[size].remove_chunk(fc);
duke@435 1870 NOT_PRODUCT(
duke@435 1871 if (FLSVerifyIndexTable) {
duke@435 1872 verifyIndexedFreeList(size);
duke@435 1873 }
duke@435 1874 )
duke@435 1875 }
duke@435 1876
duke@435 1877 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1878 /* A hint is the next larger size that has a surplus.
duke@435 1879 Start search at a size large enough to guarantee that
duke@435 1880 the excess is >= MIN_CHUNK. */
duke@435 1881 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1882 if (start < IndexSetSize) {
jmasa@4196 1883 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
duke@435 1884 size_t hint = _indexedFreeList[start].hint();
duke@435 1885 while (hint < IndexSetSize) {
duke@435 1886 assert(hint % MinObjAlignment == 0, "hint should be aligned");
jmasa@4196 1887 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
duke@435 1888 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1889 // Found a list with surplus, reset original hint
duke@435 1890 // and split out a free chunk which is returned.
duke@435 1891 _indexedFreeList[start].set_hint(hint);
duke@435 1892 FreeChunk* res = getFromListGreater(fl, numWords);
jmasa@3732 1893 assert(res == NULL || res->is_free(),
duke@435 1894 "Should be returning a free chunk");
duke@435 1895 return res;
duke@435 1896 }
duke@435 1897 hint = fl->hint(); /* keep looking */
duke@435 1898 }
duke@435 1899 /* None found. */
duke@435 1900 it[start].set_hint(IndexSetSize);
duke@435 1901 }
duke@435 1902 return NULL;
duke@435 1903 }
duke@435 1904
duke@435 1905 /* Requires fl->size >= numWords + MinChunkSize */
jmasa@4196 1906 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
duke@435 1907 size_t numWords) {
duke@435 1908 FreeChunk *curr = fl->head();
duke@435 1909 size_t oldNumWords = curr->size();
duke@435 1910 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1911 assert(curr != NULL, "List is empty");
duke@435 1912 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1913 "Size of chunks in the list is too small");
duke@435 1914
jmasa@3732 1915 fl->remove_chunk(curr);
duke@435 1916 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1917 // smallSplit(oldNumWords, numWords);
duke@435 1918 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1919 // Does anything have to be done for the remainder in terms of
duke@435 1920 // fixing the card table?
jmasa@3732 1921 assert(new_chunk == NULL || new_chunk->is_free(),
duke@435 1922 "Should be returning a free chunk");
duke@435 1923 return new_chunk;
duke@435 1924 }
duke@435 1925
duke@435 1926 FreeChunk*
duke@435 1927 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1928 size_t new_size) {
duke@435 1929 assert_locked();
duke@435 1930 size_t size = chunk->size();
duke@435 1931 assert(size > new_size, "Split from a smaller block?");
duke@435 1932 assert(is_aligned(chunk), "alignment problem");
duke@435 1933 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1934 size_t rem_size = size - new_size;
duke@435 1935 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1936 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1937 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1938 assert(is_aligned(ffc), "alignment problem");
jmasa@3732 1939 ffc->set_size(rem_size);
jmasa@3732 1940 ffc->link_next(NULL);
jmasa@3732 1941 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1942 // Above must occur before BOT is updated below.
duke@435 1943 // adjust block offset table
ysr@2071 1944 OrderAccess::storestore();
jmasa@3732 1945 assert(chunk->is_free() && ffc->is_free(), "Error");
duke@435 1946 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1947 if (rem_size < SmallForDictionary) {
duke@435 1948 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1949 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1950 assert(!is_par ||
jmasa@3294 1951 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1952 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1953 returnChunkToFreeList(ffc);
duke@435 1954 split(size, rem_size);
duke@435 1955 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1956 } else {
duke@435 1957 returnChunkToDictionary(ffc);
duke@435 1958 split(size ,rem_size);
duke@435 1959 }
jmasa@3732 1960 chunk->set_size(new_size);
duke@435 1961 return chunk;
duke@435 1962 }
duke@435 1963
duke@435 1964 void
duke@435 1965 CompactibleFreeListSpace::sweep_completed() {
duke@435 1966 // Now that space is probably plentiful, refill linear
duke@435 1967 // allocation blocks as needed.
duke@435 1968 refillLinearAllocBlocksIfNeeded();
duke@435 1969 }
duke@435 1970
duke@435 1971 void
duke@435 1972 CompactibleFreeListSpace::gc_prologue() {
duke@435 1973 assert_locked();
duke@435 1974 if (PrintFLSStatistics != 0) {
duke@435 1975 gclog_or_tty->print("Before GC:\n");
duke@435 1976 reportFreeListStatistics();
duke@435 1977 }
duke@435 1978 refillLinearAllocBlocksIfNeeded();
duke@435 1979 }
duke@435 1980
duke@435 1981 void
duke@435 1982 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1983 assert_locked();
duke@435 1984 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1985 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1986 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1987 }
duke@435 1988 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1989 _promoInfo.stopTrackingPromotions();
duke@435 1990 repairLinearAllocationBlocks();
duke@435 1991 // Print Space's stats
duke@435 1992 if (PrintFLSStatistics != 0) {
duke@435 1993 gclog_or_tty->print("After GC:\n");
duke@435 1994 reportFreeListStatistics();
duke@435 1995 }
duke@435 1996 }
duke@435 1997
duke@435 1998 // Iteration support, mostly delegated from a CMS generation
duke@435 1999
duke@435 2000 void CompactibleFreeListSpace::save_marks() {
ysr@2825 2001 assert(Thread::current()->is_VM_thread(),
ysr@2825 2002 "Global variable should only be set when single-threaded");
ysr@2825 2003 // Mark the "end" of the used space at the time of this call;
duke@435 2004 // note, however, that promoted objects from this point
duke@435 2005 // on are tracked in the _promoInfo below.
ysr@2071 2006 set_saved_mark_word(unallocated_block());
ysr@2825 2007 #ifdef ASSERT
ysr@2825 2008 // Check the sanity of save_marks() etc.
ysr@2825 2009 MemRegion ur = used_region();
ysr@2825 2010 MemRegion urasm = used_region_at_save_marks();
ysr@2825 2011 assert(ur.contains(urasm),
ysr@2825 2012 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 2013 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
ysr@2825 2014 ur.start(), ur.end(), urasm.start(), urasm.end()));
ysr@2825 2015 #endif
duke@435 2016 // inform allocator that promotions should be tracked.
duke@435 2017 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 2018 _promoInfo.startTrackingPromotions();
duke@435 2019 }
duke@435 2020
duke@435 2021 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 2022 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 2023 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 2024 "Shouldn't be called if using parallel gc.");
duke@435 2025 return _promoInfo.noPromotions();
duke@435 2026 }
duke@435 2027
duke@435 2028 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2029 \
duke@435 2030 void CompactibleFreeListSpace:: \
duke@435 2031 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2032 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2033 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2034 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2035 /* \
duke@435 2036 * This also restores any displaced headers and removes the elements from \
duke@435 2037 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2038 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2039 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2040 */ \
duke@435 2041 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2042 }
duke@435 2043
duke@435 2044 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2045
ysr@447 2046 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2047 return _smallLinearAllocBlock._word_size == 0;
duke@435 2048 }
duke@435 2049
duke@435 2050 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2051 // Fix up linear allocation blocks to look like free blocks
duke@435 2052 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2053 }
duke@435 2054
duke@435 2055 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2056 assert_locked();
duke@435 2057 if (blk->_ptr != NULL) {
duke@435 2058 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2059 "Minimum block size requirement");
duke@435 2060 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
jmasa@3732 2061 fc->set_size(blk->_word_size);
jmasa@3732 2062 fc->link_prev(NULL); // mark as free
duke@435 2063 fc->dontCoalesce();
jmasa@3732 2064 assert(fc->is_free(), "just marked it free");
duke@435 2065 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2066 }
duke@435 2067 }
duke@435 2068
duke@435 2069 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2070 assert_locked();
duke@435 2071 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2072 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2073 "Size of linAB should be zero if the ptr is NULL");
duke@435 2074 // Reset the linAB refill and allocation size limit.
duke@435 2075 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2076 }
duke@435 2077 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2078 }
duke@435 2079
duke@435 2080 void
duke@435 2081 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2082 assert_locked();
duke@435 2083 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2084 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2085 "blk invariant");
duke@435 2086 if (blk->_ptr == NULL) {
duke@435 2087 refillLinearAllocBlock(blk);
duke@435 2088 }
duke@435 2089 if (PrintMiscellaneous && Verbose) {
duke@435 2090 if (blk->_word_size == 0) {
duke@435 2091 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2092 }
duke@435 2093 }
duke@435 2094 }
duke@435 2095
duke@435 2096 void
duke@435 2097 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2098 assert_locked();
duke@435 2099 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2100 "linear allocation block should be empty");
duke@435 2101 FreeChunk* fc;
duke@435 2102 if (blk->_refillSize < SmallForDictionary &&
duke@435 2103 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2104 // A linAB's strategy might be to use small sizes to reduce
duke@435 2105 // fragmentation but still get the benefits of allocation from a
duke@435 2106 // linAB.
duke@435 2107 } else {
duke@435 2108 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2109 }
duke@435 2110 if (fc != NULL) {
duke@435 2111 blk->_ptr = (HeapWord*)fc;
duke@435 2112 blk->_word_size = fc->size();
duke@435 2113 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2114 }
duke@435 2115 }
duke@435 2116
ysr@447 2117 // Support for concurrent collection policy decisions.
ysr@447 2118 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2119 // In the future we might want to add in frgamentation stats --
ysr@447 2120 // including erosion of the "mountain" into this decision as well.
ysr@447 2121 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2122 }
ysr@447 2123
duke@435 2124 // Support for compaction
duke@435 2125
duke@435 2126 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2127 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2128 // prepare_for_compaction() uses the space between live objects
duke@435 2129 // so that later phase can skip dead space quickly. So verification
duke@435 2130 // of the free lists doesn't work after.
duke@435 2131 }
duke@435 2132
duke@435 2133 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2134 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2135
duke@435 2136 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2137 // In other versions of adjust_pointers(), a bail out
duke@435 2138 // based on the amount of live data in the generation
duke@435 2139 // (i.e., if 0, bail out) may be used.
duke@435 2140 // Cannot test used() == 0 here because the free lists have already
duke@435 2141 // been mangled by the compaction.
duke@435 2142
duke@435 2143 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2144 // See note about verification in prepare_for_compaction().
duke@435 2145 }
duke@435 2146
duke@435 2147 void CompactibleFreeListSpace::compact() {
duke@435 2148 SCAN_AND_COMPACT(obj_size);
duke@435 2149 }
duke@435 2150
duke@435 2151 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2152 // where fbs is free block sizes
duke@435 2153 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2154 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2155 double frag = 0.0;
duke@435 2156 size_t i;
duke@435 2157
duke@435 2158 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2159 double sz = i;
duke@435 2160 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2161 }
duke@435 2162
duke@435 2163 double totFree = itabFree +
jmasa@3732 2164 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
duke@435 2165 if (totFree > 0) {
duke@435 2166 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2167 (totFree * totFree));
duke@435 2168 frag = (double)1.0 - frag;
duke@435 2169 } else {
duke@435 2170 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2171 }
duke@435 2172 return frag;
duke@435 2173 }
duke@435 2174
duke@435 2175 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2176 float inter_sweep_current,
ysr@1580 2177 float inter_sweep_estimate,
ysr@1580 2178 float intra_sweep_estimate) {
duke@435 2179 assert_locked();
duke@435 2180 size_t i;
duke@435 2181 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2182 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
ysr@1580 2183 if (PrintFLSStatistics > 1) {
ysr@1580 2184 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2185 }
ysr@1580 2186 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
jmasa@3732 2187 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
jmasa@3732 2188 fl->set_before_sweep(fl->count());
jmasa@3732 2189 fl->set_bfr_surp(fl->surplus());
duke@435 2190 }
jmasa@3732 2191 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
duke@435 2192 inter_sweep_current,
ysr@1580 2193 inter_sweep_estimate,
ysr@1580 2194 intra_sweep_estimate);
duke@435 2195 }
duke@435 2196
duke@435 2197 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2198 assert_locked();
duke@435 2199 size_t i;
duke@435 2200 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2201 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2202 fl->set_surplus(fl->count() -
ysr@1580 2203 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2204 }
duke@435 2205 }
duke@435 2206
duke@435 2207 void CompactibleFreeListSpace::setFLHints() {
duke@435 2208 assert_locked();
duke@435 2209 size_t i;
duke@435 2210 size_t h = IndexSetSize;
duke@435 2211 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
jmasa@4196 2212 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2213 fl->set_hint(h);
duke@435 2214 if (fl->surplus() > 0) {
duke@435 2215 h = i;
duke@435 2216 }
duke@435 2217 }
duke@435 2218 }
duke@435 2219
duke@435 2220 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2221 assert_locked();
ysr@3264 2222 size_t i;
duke@435 2223 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2224 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2225 fl->set_prev_sweep(fl->count());
jmasa@3732 2226 fl->set_coal_births(0);
jmasa@3732 2227 fl->set_coal_deaths(0);
jmasa@3732 2228 fl->set_split_births(0);
jmasa@3732 2229 fl->set_split_deaths(0);
duke@435 2230 }
duke@435 2231 }
duke@435 2232
ysr@447 2233 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2234 if (PrintFLSStatistics > 0) {
jmasa@3732 2235 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
ysr@1580 2236 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2237 largestAddr);
ysr@1580 2238 }
duke@435 2239 setFLSurplus();
duke@435 2240 setFLHints();
duke@435 2241 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2242 printFLCensus(sweep_count);
duke@435 2243 }
duke@435 2244 clearFLCensus();
duke@435 2245 assert_locked();
jmasa@3732 2246 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
duke@435 2247 }
duke@435 2248
duke@435 2249 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2250 if (size < SmallForDictionary) {
jmasa@4196 2251 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2252 return (fl->coal_desired() < 0) ||
jmasa@3732 2253 ((int)fl->count() > fl->coal_desired());
duke@435 2254 } else {
jmasa@3732 2255 return dictionary()->coal_dict_over_populated(size);
duke@435 2256 }
duke@435 2257 }
duke@435 2258
duke@435 2259 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2260 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2261 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2262 fl->increment_coal_births();
duke@435 2263 fl->increment_surplus();
duke@435 2264 }
duke@435 2265
duke@435 2266 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2267 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2268 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2269 fl->increment_coal_deaths();
duke@435 2270 fl->decrement_surplus();
duke@435 2271 }
duke@435 2272
duke@435 2273 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2274 if (size < SmallForDictionary) {
duke@435 2275 smallCoalBirth(size);
duke@435 2276 } else {
jmasa@4196 2277 dictionary()->dict_census_update(size,
duke@435 2278 false /* split */,
duke@435 2279 true /* birth */);
duke@435 2280 }
duke@435 2281 }
duke@435 2282
duke@435 2283 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2284 if(size < SmallForDictionary) {
duke@435 2285 smallCoalDeath(size);
duke@435 2286 } else {
jmasa@4196 2287 dictionary()->dict_census_update(size,
duke@435 2288 false /* split */,
duke@435 2289 false /* birth */);
duke@435 2290 }
duke@435 2291 }
duke@435 2292
duke@435 2293 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2294 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2295 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2296 fl->increment_split_births();
duke@435 2297 fl->increment_surplus();
duke@435 2298 }
duke@435 2299
duke@435 2300 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2301 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2302 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2303 fl->increment_split_deaths();
duke@435 2304 fl->decrement_surplus();
duke@435 2305 }
duke@435 2306
jmasa@3732 2307 void CompactibleFreeListSpace::split_birth(size_t size) {
duke@435 2308 if (size < SmallForDictionary) {
duke@435 2309 smallSplitBirth(size);
duke@435 2310 } else {
jmasa@4196 2311 dictionary()->dict_census_update(size,
duke@435 2312 true /* split */,
duke@435 2313 true /* birth */);
duke@435 2314 }
duke@435 2315 }
duke@435 2316
duke@435 2317 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2318 if (size < SmallForDictionary) {
duke@435 2319 smallSplitDeath(size);
duke@435 2320 } else {
jmasa@4196 2321 dictionary()->dict_census_update(size,
duke@435 2322 true /* split */,
duke@435 2323 false /* birth */);
duke@435 2324 }
duke@435 2325 }
duke@435 2326
duke@435 2327 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2328 size_t to2 = from - to1;
duke@435 2329 splitDeath(from);
jmasa@3732 2330 split_birth(to1);
jmasa@3732 2331 split_birth(to2);
duke@435 2332 }
duke@435 2333
duke@435 2334 void CompactibleFreeListSpace::print() const {
ysr@2294 2335 print_on(tty);
duke@435 2336 }
duke@435 2337
duke@435 2338 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2339 assert_locked();
duke@435 2340 repairLinearAllocationBlocks();
duke@435 2341 // Verify that the SpoolBlocks look like free blocks of
duke@435 2342 // appropriate sizes... To be done ...
duke@435 2343 }
duke@435 2344
duke@435 2345 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2346 private:
duke@435 2347 const CompactibleFreeListSpace* _sp;
duke@435 2348 const MemRegion _span;
ysr@2071 2349 HeapWord* _last_addr;
ysr@2071 2350 size_t _last_size;
ysr@2071 2351 bool _last_was_obj;
ysr@2071 2352 bool _last_was_live;
duke@435 2353
duke@435 2354 public:
duke@435 2355 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2356 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2357 _last_addr(NULL), _last_size(0),
ysr@2071 2358 _last_was_obj(false), _last_was_live(false) { }
duke@435 2359
coleenp@548 2360 virtual size_t do_blk(HeapWord* addr) {
duke@435 2361 size_t res;
ysr@2071 2362 bool was_obj = false;
ysr@2071 2363 bool was_live = false;
duke@435 2364 if (_sp->block_is_obj(addr)) {
ysr@2071 2365 was_obj = true;
duke@435 2366 oop p = oop(addr);
duke@435 2367 guarantee(p->is_oop(), "Should be an oop");
duke@435 2368 res = _sp->adjustObjectSize(p->size());
duke@435 2369 if (_sp->obj_is_alive(addr)) {
ysr@2071 2370 was_live = true;
duke@435 2371 p->verify();
duke@435 2372 }
duke@435 2373 } else {
duke@435 2374 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2375 res = fc->size();
duke@435 2376 if (FLSVerifyLists && !fc->cantCoalesce()) {
jmasa@3732 2377 guarantee(_sp->verify_chunk_in_free_list(fc),
duke@435 2378 "Chunk should be on a free list");
duke@435 2379 }
duke@435 2380 }
ysr@2071 2381 if (res == 0) {
ysr@2071 2382 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2383 gclog_or_tty->print_cr(
ysr@2071 2384 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2385 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
ysr@2071 2386 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
ysr@2071 2387 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2388 _sp->print_on(gclog_or_tty);
ysr@2071 2389 guarantee(false, "Seppuku!");
ysr@2071 2390 }
ysr@2071 2391 _last_addr = addr;
ysr@2071 2392 _last_size = res;
ysr@2071 2393 _last_was_obj = was_obj;
ysr@2071 2394 _last_was_live = was_live;
duke@435 2395 return res;
duke@435 2396 }
duke@435 2397 };
duke@435 2398
duke@435 2399 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2400 private:
duke@435 2401 const CMSCollector* _collector;
duke@435 2402 const CompactibleFreeListSpace* _sp;
duke@435 2403 const MemRegion _span;
duke@435 2404 const bool _past_remark;
duke@435 2405 const CMSBitMap* _bit_map;
duke@435 2406
coleenp@548 2407 protected:
coleenp@548 2408 void do_oop(void* p, oop obj) {
coleenp@548 2409 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2410 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2411 // Should be a valid object; the first disjunct below allows
coleenp@548 2412 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2413 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2414 // are spanned by _span (CMS heap) above.
coleenp@548 2415 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2416 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2417 "Should be an object");
coleenp@548 2418 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2419 obj->verify();
coleenp@548 2420 if (_past_remark) {
coleenp@548 2421 // Remark has been completed, the object should be marked
coleenp@548 2422 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2423 }
coleenp@548 2424 } else { // reference within CMS heap
coleenp@548 2425 if (_past_remark) {
coleenp@548 2426 // Remark has been completed -- so the referent should have
coleenp@548 2427 // been marked, if referring object is.
coleenp@548 2428 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2429 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2430 }
coleenp@548 2431 }
coleenp@548 2432 }
coleenp@548 2433 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2434 // the reference is from FLS, and points out of FLS
coleenp@548 2435 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2436 obj->verify();
coleenp@548 2437 }
coleenp@548 2438 }
coleenp@548 2439
coleenp@548 2440 template <class T> void do_oop_work(T* p) {
coleenp@548 2441 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2442 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2443 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2444 do_oop(p, obj);
coleenp@548 2445 }
coleenp@548 2446 }
coleenp@548 2447
duke@435 2448 public:
duke@435 2449 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2450 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2451 bool past_remark, CMSBitMap* bit_map) :
coleenp@4037 2452 _collector(collector), _sp(sp), _span(span),
duke@435 2453 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2454
coleenp@548 2455 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2456 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2457 };
duke@435 2458
brutisso@3711 2459 void CompactibleFreeListSpace::verify() const {
duke@435 2460 assert_lock_strong(&_freelistLock);
duke@435 2461 verify_objects_initialized();
duke@435 2462 MemRegion span = _collector->_span;
duke@435 2463 bool past_remark = (_collector->abstract_state() ==
duke@435 2464 CMSCollector::Sweeping);
duke@435 2465
duke@435 2466 ResourceMark rm;
duke@435 2467 HandleMark hm;
duke@435 2468
duke@435 2469 // Check integrity of CFL data structures
duke@435 2470 _promoInfo.verify();
duke@435 2471 _dictionary->verify();
duke@435 2472 if (FLSVerifyIndexTable) {
duke@435 2473 verifyIndexedFreeLists();
duke@435 2474 }
duke@435 2475 // Check integrity of all objects and free blocks in space
duke@435 2476 {
duke@435 2477 VerifyAllBlksClosure cl(this, span);
duke@435 2478 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2479 }
duke@435 2480 // Check that all references in the heap to FLS
duke@435 2481 // are to valid objects in FLS or that references in
duke@435 2482 // FLS are to valid objects elsewhere in the heap
duke@435 2483 if (FLSVerifyAllHeapReferences)
duke@435 2484 {
duke@435 2485 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2486 _collector->markBitMap());
duke@435 2487 CollectedHeap* ch = Universe::heap();
coleenp@4037 2488
coleenp@4037 2489 // Iterate over all oops in the heap. Uses the _no_header version
coleenp@4037 2490 // since we are not interested in following the klass pointers.
coleenp@4037 2491 ch->oop_iterate_no_header(&cl);
duke@435 2492 }
duke@435 2493
duke@435 2494 if (VerifyObjectStartArray) {
duke@435 2495 // Verify the block offset table
duke@435 2496 _bt.verify();
duke@435 2497 }
duke@435 2498 }
duke@435 2499
duke@435 2500 #ifndef PRODUCT
duke@435 2501 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2502 if (FLSVerifyLists) {
duke@435 2503 _dictionary->verify();
duke@435 2504 verifyIndexedFreeLists();
duke@435 2505 } else {
duke@435 2506 if (FLSVerifyDictionary) {
duke@435 2507 _dictionary->verify();
duke@435 2508 }
duke@435 2509 if (FLSVerifyIndexTable) {
duke@435 2510 verifyIndexedFreeLists();
duke@435 2511 }
duke@435 2512 }
duke@435 2513 }
duke@435 2514 #endif
duke@435 2515
duke@435 2516 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2517 size_t i = 0;
ysr@3264 2518 for (; i < IndexSetStart; i++) {
duke@435 2519 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2520 }
duke@435 2521 for (; i < IndexSetSize; i++) {
duke@435 2522 verifyIndexedFreeList(i);
duke@435 2523 }
duke@435 2524 }
duke@435 2525
duke@435 2526 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2527 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2528 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2529 size_t num = _indexedFreeList[size].count();
ysr@1580 2530 size_t n = 0;
ysr@3264 2531 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2532 "Slot should have been empty");
ysr@1580 2533 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2534 guarantee(fc->size() == size, "Size inconsistency");
jmasa@3732 2535 guarantee(fc->is_free(), "!free?");
duke@435 2536 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2537 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2538 }
ysr@1580 2539 guarantee(n == num, "Incorrect count");
duke@435 2540 }
duke@435 2541
duke@435 2542 #ifndef PRODUCT
ysr@3220 2543 void CompactibleFreeListSpace::check_free_list_consistency() const {
goetz@6337 2544 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
duke@435 2545 "Some sizes can't be allocated without recourse to"
duke@435 2546 " linear allocation buffers");
goetz@6337 2547 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
duke@435 2548 "else MIN_TREE_CHUNK_SIZE is wrong");
brutisso@3807 2549 assert(IndexSetStart != 0, "IndexSetStart not initialized");
brutisso@3807 2550 assert(IndexSetStride != 0, "IndexSetStride not initialized");
duke@435 2551 }
duke@435 2552 #endif
duke@435 2553
ysr@447 2554 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2555 assert_lock_strong(&_freelistLock);
jmasa@4196 2556 AdaptiveFreeList<FreeChunk> total;
ysr@447 2557 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
jmasa@4196 2558 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
jmasa@3732 2559 size_t total_free = 0;
duke@435 2560 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2561 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2562 total_free += fl->count() * fl->size();
ysr@447 2563 if (i % (40*IndexSetStride) == 0) {
jmasa@4196 2564 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
ysr@447 2565 }
ysr@447 2566 fl->print_on(gclog_or_tty);
jmasa@3732 2567 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
ysr@447 2568 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2569 total.set_desired( total.desired() + fl->desired() );
jmasa@3732 2570 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
jmasa@3732 2571 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
ysr@447 2572 total.set_count( total.count() + fl->count() );
jmasa@3732 2573 total.set_coal_births( total.coal_births() + fl->coal_births() );
jmasa@3732 2574 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
jmasa@3732 2575 total.set_split_births(total.split_births() + fl->split_births());
jmasa@3732 2576 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
duke@435 2577 }
ysr@447 2578 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2579 gclog_or_tty->print_cr("Total free in indexed lists "
jmasa@3732 2580 SIZE_FORMAT " words", total_free);
duke@435 2581 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
jmasa@3732 2582 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
jmasa@3732 2583 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
ysr@447 2584 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
jmasa@3732 2585 _dictionary->print_dict_census();
duke@435 2586 }
duke@435 2587
ysr@1580 2588 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2589 // CFLS_LAB
ysr@1580 2590 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2591
ysr@1580 2592 #define VECTOR_257(x) \
ysr@1580 2593 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2594 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2595 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2596 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2597 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2598 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2599 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2600 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2601 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2602 x }
ysr@1580 2603
ysr@1580 2604 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2605 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2606 // command-line, this will get reinitialized via a call to
ysr@1580 2607 // modify_initialization() below.
ysr@1580 2608 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2609 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2610 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2611 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2612
duke@435 2613 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2614 _cfls(cfls)
duke@435 2615 {
ysr@1580 2616 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2617 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2618 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2619 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2620 _indexedFreeList[i].set_size(i);
ysr@1580 2621 _num_blocks[i] = 0;
ysr@1580 2622 }
ysr@1580 2623 }
ysr@1580 2624
ysr@1580 2625 static bool _CFLS_LAB_modified = false;
ysr@1580 2626
ysr@1580 2627 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2628 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2629 _CFLS_LAB_modified = true;
ysr@1580 2630 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2631 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2632 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2633 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2634 }
duke@435 2635 }
duke@435 2636
duke@435 2637 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2638 FreeChunk* res;
ysr@2132 2639 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2640 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2641 // This locking manages sync with other large object allocations.
duke@435 2642 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2643 Mutex::_no_safepoint_check_flag);
duke@435 2644 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2645 if (res == NULL) return NULL;
duke@435 2646 } else {
jmasa@4196 2647 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
duke@435 2648 if (fl->count() == 0) {
duke@435 2649 // Attempt to refill this local free list.
ysr@1580 2650 get_from_global_pool(word_sz, fl);
duke@435 2651 // If it didn't work, give up.
duke@435 2652 if (fl->count() == 0) return NULL;
duke@435 2653 }
jmasa@3732 2654 res = fl->get_chunk_at_head();
duke@435 2655 assert(res != NULL, "Why was count non-zero?");
duke@435 2656 }
duke@435 2657 res->markNotFree();
jmasa@3732 2658 assert(!res->is_free(), "shouldn't be marked free");
coleenp@622 2659 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2660 // mangle a just allocated object with a distinct pattern.
duke@435 2661 debug_only(res->mangleAllocated(word_sz));
duke@435 2662 return (HeapWord*)res;
duke@435 2663 }
duke@435 2664
ysr@1580 2665 // Get a chunk of blocks of the right size and update related
ysr@1580 2666 // book-keeping stats
jmasa@4196 2667 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
ysr@1580 2668 // Get the #blocks we want to claim
ysr@1580 2669 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2670 assert(n_blks > 0, "Error");
ysr@1580 2671 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2672 // In some cases, when the application has a phase change,
ysr@1580 2673 // there may be a sudden and sharp shift in the object survival
ysr@1580 2674 // profile, and updating the counts at the end of a scavenge
ysr@1580 2675 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2676 // during these phase changes. It is beneficial to detect such
ysr@1580 2677 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2678 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2679 // It is protected by a product flag until we have gained
ysr@1580 2680 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2681 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2682 // small spikes, so some kind of historical smoothing based on
ysr@1580 2683 // previous experience with the greater reactivity might be useful.
ysr@1580 2684 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2685 // default.
ysr@1580 2686 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2687 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2688 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2689 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2690 }
ysr@1580 2691 assert(n_blks > 0, "Error");
ysr@1580 2692 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2693 // Update stats table entry for this block size
ysr@1580 2694 _num_blocks[word_sz] += fl->count();
ysr@1580 2695 }
ysr@1580 2696
ysr@1580 2697 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2698 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2699 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2700 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2701 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2702 "Counter inconsistency");
ysr@1580 2703 if (_global_num_workers[i] > 0) {
ysr@1580 2704 // Need to smooth wrt historical average
ysr@1580 2705 if (ResizeOldPLAB) {
ysr@1580 2706 _blocks_to_claim[i].sample(
ysr@1580 2707 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2708 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2709 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2710 }
ysr@1580 2711 // Reset counters for next round
ysr@1580 2712 _global_num_workers[i] = 0;
ysr@1580 2713 _global_num_blocks[i] = 0;
ysr@1580 2714 if (PrintOldPLAB) {
ysr@1580 2715 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2716 }
duke@435 2717 }
duke@435 2718 }
duke@435 2719 }
duke@435 2720
ysr@3220 2721 // If this is changed in the future to allow parallel
ysr@3220 2722 // access, one would need to take the FL locks and,
ysr@3220 2723 // depending on how it is used, stagger access from
ysr@3220 2724 // parallel threads to reduce contention.
ysr@1580 2725 void CFLS_LAB::retire(int tid) {
ysr@1580 2726 // We run this single threaded with the world stopped;
ysr@1580 2727 // so no need for locks and such.
ysr@1580 2728 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2729 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2730 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2731 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2732 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2733 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2734 "Can't retire more than what we obtained");
ysr@1580 2735 if (_num_blocks[i] > 0) {
ysr@1580 2736 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2737 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2738 {
ysr@3220 2739 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2740 // Mutex::_no_safepoint_check_flag);
ysr@3220 2741
ysr@1580 2742 // Update globals stats for num_blocks used
ysr@1580 2743 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2744 _global_num_workers[i]++;
jmasa@3357 2745 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2746 if (num_retire > 0) {
ysr@1580 2747 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2748 // Reset this list.
jmasa@4196 2749 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
ysr@1580 2750 _indexedFreeList[i].set_size(i);
ysr@1580 2751 }
ysr@1580 2752 }
ysr@1580 2753 if (PrintOldPLAB) {
ysr@1580 2754 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2755 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2756 }
ysr@1580 2757 // Reset stats for next round
ysr@1580 2758 _num_blocks[i] = 0;
ysr@1580 2759 }
ysr@1580 2760 }
ysr@1580 2761 }
ysr@1580 2762
jmasa@4196 2763 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
duke@435 2764 assert(fl->count() == 0, "Precondition.");
duke@435 2765 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2766 "Precondition");
duke@435 2767
ysr@1580 2768 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2769 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2770 // then try getting a big chunk and splitting it.
ysr@1580 2771 {
ysr@1580 2772 bool found;
ysr@1580 2773 int k;
ysr@1580 2774 size_t cur_sz;
ysr@1580 2775 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2776 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2777 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2778 k++, cur_sz = k * word_sz) {
jmasa@4196 2779 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
ysr@1580 2780 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2781 {
ysr@1580 2782 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2783 Mutex::_no_safepoint_check_flag);
jmasa@4196 2784 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2785 if (gfl->count() != 0) {
ysr@1580 2786 // nn is the number of chunks of size cur_sz that
ysr@1580 2787 // we'd need to split k-ways each, in order to create
ysr@1580 2788 // "n" chunks of size word_sz each.
ysr@1580 2789 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2790 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2791 found = true;
ysr@1580 2792 if (k > 1) {
ysr@1580 2793 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2794 // we increment the split death count by the number of blocks
ysr@1580 2795 // we just took from the cur_sz-size blocks list and which
ysr@1580 2796 // we will be splitting below.
jmasa@3732 2797 ssize_t deaths = gfl->split_deaths() +
ysr@1580 2798 fl_for_cur_sz.count();
jmasa@3732 2799 gfl->set_split_deaths(deaths);
ysr@1580 2800 }
ysr@1580 2801 }
ysr@1580 2802 }
ysr@1580 2803 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2804 if (found) {
ysr@1580 2805 if (k == 1) {
ysr@1580 2806 fl->prepend(&fl_for_cur_sz);
ysr@1580 2807 } else {
ysr@1580 2808 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2809 FreeChunk* fc;
jmasa@3732 2810 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
ysr@1580 2811 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2812 // access the main chunk sees it as a single free block until we
ysr@1580 2813 // change it.
ysr@1580 2814 size_t fc_size = fc->size();
jmasa@3732 2815 assert(fc->is_free(), "Error");
ysr@1580 2816 for (int i = k-1; i >= 0; i--) {
ysr@1580 2817 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2818 assert((i != 0) ||
jmasa@3732 2819 ((fc == ffc) && ffc->is_free() &&
ysr@2071 2820 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2821 "Counting error");
jmasa@3732 2822 ffc->set_size(word_sz);
jmasa@3732 2823 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2824 ffc->link_next(NULL);
ysr@1580 2825 // Above must occur before BOT is updated below.
ysr@2071 2826 OrderAccess::storestore();
ysr@2071 2827 // splitting from the right, fc_size == i * word_sz
ysr@2071 2828 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2829 fc_size -= word_sz;
ysr@2071 2830 assert(fc_size == i*word_sz, "Error");
ysr@2071 2831 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2832 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2833 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2834 // Push this on "fl".
jmasa@3732 2835 fl->return_chunk_at_head(ffc);
ysr@1580 2836 }
ysr@1580 2837 // TRAP
ysr@1580 2838 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2839 }
ysr@1580 2840 }
ysr@1580 2841 // Update birth stats for this block size.
ysr@1580 2842 size_t num = fl->count();
ysr@1580 2843 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2844 Mutex::_no_safepoint_check_flag);
jmasa@3732 2845 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
jmasa@3732 2846 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2847 return;
duke@435 2848 }
duke@435 2849 }
duke@435 2850 }
duke@435 2851 // Otherwise, we'll split a block from the dictionary.
duke@435 2852 FreeChunk* fc = NULL;
duke@435 2853 FreeChunk* rem_fc = NULL;
duke@435 2854 size_t rem;
duke@435 2855 {
duke@435 2856 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2857 Mutex::_no_safepoint_check_flag);
duke@435 2858 while (n > 0) {
jmasa@4196 2859 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
jmasa@3730 2860 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 2861 if (fc != NULL) {
ysr@2071 2862 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@4196 2863 dictionary()->dict_census_update(fc->size(),
duke@435 2864 true /*split*/,
duke@435 2865 false /*birth*/);
duke@435 2866 break;
duke@435 2867 } else {
duke@435 2868 n--;
duke@435 2869 }
duke@435 2870 }
duke@435 2871 if (fc == NULL) return;
ysr@2071 2872 // Otherwise, split up that block.
ysr@1580 2873 assert((ssize_t)n >= 1, "Control point invariant");
jmasa@3732 2874 assert(fc->is_free(), "Error: should be a free block");
ysr@2071 2875 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2876 const size_t nn = fc->size() / word_sz;
duke@435 2877 n = MIN2(nn, n);
ysr@1580 2878 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2879 rem = fc->size() - n * word_sz;
duke@435 2880 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2881 if (rem > 0 && rem < MinChunkSize) {
duke@435 2882 n--; rem += word_sz;
duke@435 2883 }
jmasa@1583 2884 // Note that at this point we may have n == 0.
jmasa@1583 2885 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2886
jmasa@1583 2887 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2888 // enough to leave a viable remainder. We are unable to
jmasa@1583 2889 // allocate even one block. Return fc to the
jmasa@1583 2890 // dictionary and return, leaving "fl" empty.
jmasa@1583 2891 if (n == 0) {
jmasa@1583 2892 returnChunkToDictionary(fc);
ysr@2071 2893 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2894 return;
jmasa@1583 2895 }
jmasa@1583 2896
duke@435 2897 // First return the remainder, if any.
duke@435 2898 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2899 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2900 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2901 // hit if the block is a small block.)
duke@435 2902 if (rem > 0) {
duke@435 2903 size_t prefix_size = n * word_sz;
duke@435 2904 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
jmasa@3732 2905 rem_fc->set_size(rem);
jmasa@3732 2906 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2907 rem_fc->link_next(NULL);
duke@435 2908 // Above must occur before BOT is updated below.
ysr@1580 2909 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2910 OrderAccess::storestore();
duke@435 2911 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
jmasa@3732 2912 assert(fc->is_free(), "Error");
jmasa@3732 2913 fc->set_size(prefix_size);
duke@435 2914 if (rem >= IndexSetSize) {
duke@435 2915 returnChunkToDictionary(rem_fc);
jmasa@4196 2916 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
duke@435 2917 rem_fc = NULL;
duke@435 2918 }
duke@435 2919 // Otherwise, return it to the small list below.
duke@435 2920 }
duke@435 2921 }
duke@435 2922 if (rem_fc != NULL) {
duke@435 2923 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2924 Mutex::_no_safepoint_check_flag);
duke@435 2925 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
jmasa@3732 2926 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
duke@435 2927 smallSplitBirth(rem);
duke@435 2928 }
ysr@1580 2929 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2930 // Now do the splitting up.
duke@435 2931 // Must do this in reverse order, so that anybody attempting to
duke@435 2932 // access the main chunk sees it as a single free block until we
duke@435 2933 // change it.
duke@435 2934 size_t fc_size = n * word_sz;
duke@435 2935 // All but first chunk in this loop
duke@435 2936 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2937 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
jmasa@3732 2938 ffc->set_size(word_sz);
jmasa@3732 2939 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2940 ffc->link_next(NULL);
duke@435 2941 // Above must occur before BOT is updated below.
ysr@2071 2942 OrderAccess::storestore();
duke@435 2943 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2944 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2945 fc_size -= word_sz;
duke@435 2946 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2947 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2948 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2949 // Push this on "fl".
jmasa@3732 2950 fl->return_chunk_at_head(ffc);
duke@435 2951 }
duke@435 2952 // First chunk
jmasa@3732 2953 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2954 // The blocks above should show their new sizes before the first block below
jmasa@3732 2955 fc->set_size(word_sz);
jmasa@3732 2956 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
jmasa@3732 2957 fc->link_next(NULL);
duke@435 2958 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2959 _bt.verify_single_block((HeapWord*)fc, fc->size());
jmasa@3732 2960 fl->return_chunk_at_head(fc);
duke@435 2961
ysr@1580 2962 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2963 {
ysr@1580 2964 // Update the stats for this block size.
duke@435 2965 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2966 Mutex::_no_safepoint_check_flag);
jmasa@3732 2967 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
jmasa@3732 2968 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2969 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2970 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2971 }
duke@435 2972
duke@435 2973 // TRAP
duke@435 2974 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2975 }
duke@435 2976
duke@435 2977 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2978 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2979 // XXX Need to suitably abstract and generalize this and the next
duke@435 2980 // method into one.
duke@435 2981 void
duke@435 2982 CompactibleFreeListSpace::
duke@435 2983 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2984 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2985 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2986 const size_t task_size = rescan_task_size();
duke@435 2987 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2988 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2989 assert(n_tasks == 0 ||
ysr@775 2990 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2991 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2992 "n_tasks calculation incorrect");
duke@435 2993 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2994 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2995 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2996 // need to finish in order to be done).
jmasa@2188 2997 pst->set_n_threads(n_threads);
duke@435 2998 pst->set_n_tasks((int)n_tasks);
duke@435 2999 }
duke@435 3000
duke@435 3001 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3002 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 3003 void
duke@435 3004 CompactibleFreeListSpace::
duke@435 3005 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 3006 HeapWord* low) {
duke@435 3007 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3008 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3009 const size_t task_size = marking_task_size();
duke@435 3010 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3011 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3012 "Otherwise arithmetic below would be incorrect");
duke@435 3013 MemRegion span = _gen->reserved();
duke@435 3014 if (low != NULL) {
duke@435 3015 if (span.contains(low)) {
duke@435 3016 // Align low down to a card boundary so that
duke@435 3017 // we can use block_offset_careful() on span boundaries.
duke@435 3018 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3019 CardTableModRefBS::card_size);
duke@435 3020 // Clip span prefix at aligned_low
duke@435 3021 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3022 } else if (low > span.end()) {
duke@435 3023 span = MemRegion(low, low); // Null region
duke@435 3024 } // else use entire span
duke@435 3025 }
duke@435 3026 assert(span.is_empty() ||
duke@435 3027 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3028 "span should start at a card boundary");
duke@435 3029 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3030 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3031 assert(n_tasks == 0 ||
duke@435 3032 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3033 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3034 "n_tasks calculation incorrect");
duke@435 3035 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3036 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3037 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3038 // need to finish in order to be done).
jmasa@2188 3039 pst->set_n_threads(n_threads);
duke@435 3040 pst->set_n_tasks((int)n_tasks);
duke@435 3041 }

mercurial