src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 23 Oct 2014 12:02:08 -0700

author
asaha
date
Thu, 23 Oct 2014 12:02:08 -0700
changeset 7476
c2844108a708
parent 7234
4001310db3f5
parent 7474
6e56d7f1634f
child 7485
9fa3bf3043a2
permissions
-rw-r--r--

Merge

duke@435 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
coleenp@4037 32 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
goetz@6912 36 #include "memory/space.inline.hpp"
stefank@2314 37 #include "memory/universe.inline.hpp"
stefank@2314 38 #include "oops/oop.inline.hpp"
stefank@2314 39 #include "runtime/globals.hpp"
stefank@2314 40 #include "runtime/handles.inline.hpp"
stefank@2314 41 #include "runtime/init.hpp"
stefank@2314 42 #include "runtime/java.hpp"
goetz@6911 43 #include "runtime/orderAccess.inline.hpp"
stefank@2314 44 #include "runtime/vmThread.hpp"
stefank@2314 45 #include "utilities/copy.hpp"
duke@435 46
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48 //// CompactibleFreeListSpace
duke@435 49 /////////////////////////////////////////////////////////////////////////
duke@435 50
duke@435 51 // highest ranked free list lock rank
duke@435 52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 53
kvn@1926 54 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 55 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 57
kvn@1926 58 size_t MinChunkSize = 0;
kvn@1926 59
kvn@1926 60 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 61 // Set CMS global values
kvn@1926 62 assert(MinChunkSize == 0, "already set");
brutisso@3807 63
brutisso@3807 64 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
brutisso@3807 65 // for chunks to contain a FreeChunk.
brutisso@3807 66 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
brutisso@3807 67 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
kvn@1926 68
kvn@1926 69 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 70 IndexSetStart = MinChunkSize;
kvn@1926 71 IndexSetStride = MinObjAlignment;
kvn@1926 72 }
kvn@1926 73
duke@435 74 // Constructor
duke@435 75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 76 MemRegion mr, bool use_adaptive_freelists,
jmasa@3730 77 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@435 78 _dictionaryChoice(dictionaryChoice),
duke@435 79 _adaptive_freelists(use_adaptive_freelists),
duke@435 80 _bt(bs, mr),
duke@435 81 // free list locks are in the range of values taken by _lockRank
duke@435 82 // This range currently is [_leaf+2, _leaf+3]
duke@435 83 // Note: this requires that CFLspace c'tors
duke@435 84 // are called serially in the order in which the locks are
duke@435 85 // are acquired in the program text. This is true today.
duke@435 86 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 87 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 88 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 89 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 90 CMSRescanMultiple),
duke@435 91 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 92 CMSConcMarkMultiple),
duke@435 93 _collector(NULL)
duke@435 94 {
jmasa@3730 95 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
jmasa@4196 96 "FreeChunk is larger than expected");
duke@435 97 _bt.set_space(this);
jmasa@698 98 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 99 // We have all of "mr", all of which we place in the dictionary
duke@435 100 // as one big chunk. We'll need to decide here which of several
duke@435 101 // possible alternative dictionary implementations to use. For
duke@435 102 // now the choice is easy, since we have only one working
duke@435 103 // implementation, namely, the simple binary tree (splaying
duke@435 104 // temporarily disabled).
duke@435 105 switch (dictionaryChoice) {
jmasa@4196 106 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
jmasa@4488 107 _dictionary = new AFLBinaryTreeDictionary(mr);
jmasa@4196 108 break;
jmasa@3730 109 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
jmasa@3730 110 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
duke@435 111 default:
duke@435 112 warning("dictionaryChoice: selected option not understood; using"
duke@435 113 " default BinaryTreeDictionary implementation instead.");
duke@435 114 }
duke@435 115 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 116 // The indexed free lists are initially all empty and are lazily
duke@435 117 // filled in on demand. Initialize the array elements to NULL.
duke@435 118 initializeIndexedFreeListArray();
duke@435 119
duke@435 120 // Not using adaptive free lists assumes that allocation is first
duke@435 121 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 122 // has to have the klass's klassKlass allocated at a lower
duke@435 123 // address in the heap than the klass so that the klassKlass is
duke@435 124 // moved to its new location before the klass is moved.
duke@435 125 // Set the _refillSize for the linear allocation blocks
duke@435 126 if (!use_adaptive_freelists) {
jmasa@4488 127 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
jmasa@4488 128 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 129 // The small linAB initially has all the space and will allocate
duke@435 130 // a chunk of any size.
duke@435 131 HeapWord* addr = (HeapWord*) fc;
duke@435 132 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 133 1024*SmallForLinearAlloc, fc->size());
duke@435 134 // Note that _unallocated_block is not updated here.
duke@435 135 // Allocations from the linear allocation block should
duke@435 136 // update it.
duke@435 137 } else {
duke@435 138 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 139 SmallForLinearAlloc);
duke@435 140 }
duke@435 141 // CMSIndexedFreeListReplenish should be at least 1
duke@435 142 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 143 _promoInfo.setSpace(this);
duke@435 144 if (UseCMSBestFit) {
duke@435 145 _fitStrategy = FreeBlockBestFitFirst;
duke@435 146 } else {
duke@435 147 _fitStrategy = FreeBlockStrategyNone;
duke@435 148 }
ysr@3220 149 check_free_list_consistency();
duke@435 150
duke@435 151 // Initialize locks for parallel case.
jmasa@2188 152
jmasa@2188 153 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 154 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 155 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 156 "a freelist par lock",
duke@435 157 true);
duke@435 158 DEBUG_ONLY(
duke@435 159 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 160 )
duke@435 161 }
duke@435 162 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 163 }
duke@435 164 }
duke@435 165
duke@435 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 167 // update the block offset table. Removed initialize_threshold call because
duke@435 168 // CFLS does not use a block offset array for contiguous spaces.
duke@435 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 170 CompactPoint* cp, HeapWord* compact_top) {
duke@435 171 // q is alive
duke@435 172 // First check if we should switch compaction space
duke@435 173 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 174 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 175 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 176 "virtual adjustObjectSize_v() method is not correct");
duke@435 177 size_t adjusted_size = adjustObjectSize(size);
duke@435 178 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 179 "no small fragments allowed");
duke@435 180 assert(minimum_free_block_size() == MinChunkSize,
duke@435 181 "for de-virtualized reference below");
duke@435 182 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 183 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 184 adjusted_size != compaction_max_size) {
duke@435 185 do {
duke@435 186 // switch to next compaction space
duke@435 187 cp->space->set_compaction_top(compact_top);
duke@435 188 cp->space = cp->space->next_compaction_space();
duke@435 189 if (cp->space == NULL) {
duke@435 190 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 191 assert(cp->gen != NULL, "compaction must succeed");
duke@435 192 cp->space = cp->gen->first_compaction_space();
duke@435 193 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 194 }
duke@435 195 compact_top = cp->space->bottom();
duke@435 196 cp->space->set_compaction_top(compact_top);
duke@435 197 // The correct adjusted_size may not be the same as that for this method
duke@435 198 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 199 // Use the virtual method which is not used above to save the virtual
duke@435 200 // dispatch.
duke@435 201 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 202 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 203 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 204 } while (adjusted_size > compaction_max_size);
duke@435 205 }
duke@435 206
duke@435 207 // store the forwarding pointer into the mark word
duke@435 208 if ((HeapWord*)q != compact_top) {
duke@435 209 q->forward_to(oop(compact_top));
duke@435 210 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 211 } else {
duke@435 212 // if the object isn't moving we can just set the mark to the default
duke@435 213 // mark and handle it specially later on.
duke@435 214 q->init_mark();
duke@435 215 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 216 }
duke@435 217
duke@435 218 compact_top += adjusted_size;
duke@435 219
duke@435 220 // we need to update the offset table so that the beginnings of objects can be
duke@435 221 // found during scavenge. Note that we are updating the offset table based on
duke@435 222 // where the object will be once the compaction phase finishes.
duke@435 223
duke@435 224 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 225 // the compaction_top exceeds the current threshold but not for an
duke@435 226 // non-contiguous space.
duke@435 227 cp->threshold =
duke@435 228 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 229 return compact_top;
duke@435 230 }
duke@435 231
duke@435 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 233 // and use of single_block instead of alloc_block. The name here is not really
duke@435 234 // appropriate - maybe a more general name could be invented for both the
duke@435 235 // contiguous and noncontiguous spaces.
duke@435 236
duke@435 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 238 _bt.single_block(start, the_end);
duke@435 239 return end();
duke@435 240 }
duke@435 241
duke@435 242 // Initialize them to NULL.
duke@435 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 244 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 245 // Note that on platforms where objects are double word aligned,
duke@435 246 // the odd array elements are not used. It is convenient, however,
duke@435 247 // to map directly from the object size to the array element.
duke@435 248 _indexedFreeList[i].reset(IndexSetSize);
duke@435 249 _indexedFreeList[i].set_size(i);
duke@435 250 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 251 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 252 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 253 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 254 }
duke@435 255 }
duke@435 256
duke@435 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 258 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 259 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 260 "Indexed free list sizes are incorrect");
duke@435 261 _indexedFreeList[i].reset(IndexSetSize);
duke@435 262 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 263 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 264 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 265 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 266 }
duke@435 267 }
duke@435 268
duke@435 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 270 resetIndexedFreeListArray();
duke@435 271 dictionary()->reset();
duke@435 272 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 273 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 274 // Everything's allocated until proven otherwise.
duke@435 275 _bt.set_unallocated_block(end());
duke@435 276 }
duke@435 277 if (!mr.is_empty()) {
duke@435 278 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 279 _bt.single_block(mr.start(), mr.word_size());
duke@435 280 FreeChunk* fc = (FreeChunk*) mr.start();
jmasa@3732 281 fc->set_size(mr.word_size());
duke@435 282 if (mr.word_size() >= IndexSetSize ) {
duke@435 283 returnChunkToDictionary(fc);
duke@435 284 } else {
duke@435 285 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
jmasa@3732 286 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
duke@435 287 }
brutisso@5163 288 coalBirth(mr.word_size());
duke@435 289 }
duke@435 290 _promoInfo.reset();
duke@435 291 _smallLinearAllocBlock._ptr = NULL;
duke@435 292 _smallLinearAllocBlock._word_size = 0;
duke@435 293 }
duke@435 294
duke@435 295 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 296 // Reset the space to the new reality - one free chunk.
duke@435 297 MemRegion mr(compaction_top(), end());
duke@435 298 reset(mr);
duke@435 299 // Now refill the linear allocation block(s) if possible.
duke@435 300 if (_adaptive_freelists) {
duke@435 301 refillLinearAllocBlocksIfNeeded();
duke@435 302 } else {
duke@435 303 // Place as much of mr in the linAB as we can get,
duke@435 304 // provided it was big enough to go into the dictionary.
jmasa@3732 305 FreeChunk* fc = dictionary()->find_largest_dict();
duke@435 306 if (fc != NULL) {
duke@435 307 assert(fc->size() == mr.word_size(),
duke@435 308 "Why was the chunk broken up?");
duke@435 309 removeChunkFromDictionary(fc);
duke@435 310 HeapWord* addr = (HeapWord*) fc;
duke@435 311 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 312 1024*SmallForLinearAlloc, fc->size());
duke@435 313 // Note that _unallocated_block is not updated here.
duke@435 314 }
duke@435 315 }
duke@435 316 }
duke@435 317
duke@435 318 // Walks the entire dictionary, returning a coterminal
duke@435 319 // chunk, if it exists. Use with caution since it involves
duke@435 320 // a potentially complete walk of a potentially large tree.
duke@435 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 322
duke@435 323 assert_lock_strong(&_freelistLock);
duke@435 324
duke@435 325 return dictionary()->find_chunk_ends_at(end());
duke@435 326 }
duke@435 327
duke@435 328
duke@435 329 #ifndef PRODUCT
duke@435 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 331 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 332 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
duke@435 333 }
duke@435 334 }
duke@435 335
duke@435 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 337 size_t sum = 0;
duke@435 338 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 339 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
duke@435 340 }
duke@435 341 return sum;
duke@435 342 }
duke@435 343
duke@435 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 345 size_t count = 0;
ysr@3264 346 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 347 debug_only(
duke@435 348 ssize_t total_list_count = 0;
duke@435 349 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 350 fc = fc->next()) {
duke@435 351 total_list_count++;
duke@435 352 }
duke@435 353 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 354 "Count in list is incorrect");
duke@435 355 )
duke@435 356 count += _indexedFreeList[i].count();
duke@435 357 }
duke@435 358 return count;
duke@435 359 }
duke@435 360
duke@435 361 size_t CompactibleFreeListSpace::totalCount() {
duke@435 362 size_t num = totalCountInIndexedFreeLists();
jmasa@3732 363 num += dictionary()->total_count();
duke@435 364 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 365 num++;
duke@435 366 }
duke@435 367 return num;
duke@435 368 }
duke@435 369 #endif
duke@435 370
duke@435 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 372 FreeChunk* fc = (FreeChunk*) p;
jmasa@3732 373 return fc->is_free();
duke@435 374 }
duke@435 375
duke@435 376 size_t CompactibleFreeListSpace::used() const {
duke@435 377 return capacity() - free();
duke@435 378 }
duke@435 379
duke@435 380 size_t CompactibleFreeListSpace::free() const {
duke@435 381 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 382 // if you do this while the structures are in flux you
duke@435 383 // may get an approximate answer only; for instance
duke@435 384 // because there is concurrent allocation either
duke@435 385 // directly by mutators or for promotion during a GC.
duke@435 386 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 387 // not to crash and burn, for instance, because of walking
duke@435 388 // pointers that could disappear as you were walking them.
duke@435 389 // The approximation is because the various components
duke@435 390 // that are read below are not read atomically (and
duke@435 391 // further the computation of totalSizeInIndexedFreeLists()
duke@435 392 // is itself a non-atomic computation. The normal use of
duke@435 393 // this is during a resize operation at the end of GC
duke@435 394 // and at that time you are guaranteed to get the
duke@435 395 // correct actual value. However, for instance, this is
duke@435 396 // also read completely asynchronously by the "perf-sampler"
duke@435 397 // that supports jvmstat, and you are apt to see the values
duke@435 398 // flicker in such cases.
duke@435 399 assert(_dictionary != NULL, "No _dictionary?");
jmasa@3732 400 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
duke@435 401 totalSizeInIndexedFreeLists() +
duke@435 402 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 403 }
duke@435 404
duke@435 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 406 assert(_dictionary != NULL, "No _dictionary?");
duke@435 407 assert_locked();
jmasa@3732 408 size_t res = _dictionary->max_chunk_size();
duke@435 409 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 410 (size_t) SmallForLinearAlloc - 1));
duke@435 411 // XXX the following could potentially be pretty slow;
duke@435 412 // should one, pesimally for the rare cases when res
duke@435 413 // caclulated above is less than IndexSetSize,
duke@435 414 // just return res calculated above? My reasoning was that
duke@435 415 // those cases will be so rare that the extra time spent doesn't
duke@435 416 // really matter....
duke@435 417 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 418 // to i > res below, because i is unsigned and res may be zero.
duke@435 419 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 420 i -= IndexSetStride) {
duke@435 421 if (_indexedFreeList[i].head() != NULL) {
duke@435 422 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 423 return i;
duke@435 424 }
duke@435 425 }
duke@435 426 return res;
duke@435 427 }
duke@435 428
ysr@2071 429 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 430 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 431 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
drchase@6680 432 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
ysr@2071 433 }
ysr@2071 434
ysr@2071 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 436 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 437 st->print_cr(" Space:");
ysr@2071 438 Space::print_on(st);
ysr@2071 439
ysr@2071 440 st->print_cr("promoInfo:");
ysr@2071 441 _promoInfo.print_on(st);
ysr@2071 442
ysr@2071 443 st->print_cr("_smallLinearAllocBlock");
ysr@2071 444 _smallLinearAllocBlock.print_on(st);
ysr@2071 445
ysr@2071 446 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 447
ysr@2071 448 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 449 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 450 }
ysr@2071 451
ysr@1580 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 453 const {
ysr@1580 454 reportIndexedFreeListStatistics();
ysr@1580 455 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 456 gclog_or_tty->print_cr("---------------------------");
jmasa@4196 457 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
ysr@1580 458 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 459 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 460 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 461 fc = fc->next()) {
ysr@1580 462 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
drchase@6680 463 p2i(fc), p2i((HeapWord*)fc + i),
ysr@1580 464 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 465 }
ysr@1580 466 }
ysr@1580 467 }
ysr@1580 468
ysr@1580 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 470 const {
ysr@1580 471 _promoInfo.print_on(st);
ysr@1580 472 }
ysr@1580 473
ysr@1580 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 475 const {
jmasa@3732 476 _dictionary->report_statistics();
ysr@1580 477 st->print_cr("Layout of Freelists in Tree");
ysr@1580 478 st->print_cr("---------------------------");
ysr@1580 479 _dictionary->print_free_lists(st);
ysr@1580 480 }
ysr@1580 481
ysr@1580 482 class BlkPrintingClosure: public BlkClosure {
ysr@1580 483 const CMSCollector* _collector;
ysr@1580 484 const CompactibleFreeListSpace* _sp;
ysr@1580 485 const CMSBitMap* _live_bit_map;
ysr@1580 486 const bool _post_remark;
ysr@1580 487 outputStream* _st;
ysr@1580 488 public:
ysr@1580 489 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 490 const CompactibleFreeListSpace* sp,
ysr@1580 491 const CMSBitMap* live_bit_map,
ysr@1580 492 outputStream* st):
ysr@1580 493 _collector(collector),
ysr@1580 494 _sp(sp),
ysr@1580 495 _live_bit_map(live_bit_map),
ysr@1580 496 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 497 _st(st) { }
ysr@1580 498 size_t do_blk(HeapWord* addr);
ysr@1580 499 };
ysr@1580 500
ysr@1580 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 502 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 503 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 504 if (_sp->block_is_obj(addr)) {
ysr@1580 505 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 506 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
drchase@6680 507 p2i(addr),
ysr@1580 508 dead ? "dead" : "live",
ysr@1580 509 sz,
ysr@1580 510 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 511 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 512 oop(addr)->print_on(_st);
ysr@1580 513 _st->print_cr("--------------------------------------");
ysr@1580 514 }
ysr@1580 515 } else { // free block
ysr@1580 516 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
drchase@6680 517 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 518 if (CMSPrintChunksInDump) {
ysr@1580 519 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 520 _st->print_cr("--------------------------------------");
ysr@1580 521 }
ysr@1580 522 }
ysr@1580 523 return sz;
ysr@1580 524 }
ysr@1580 525
ysr@1580 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 527 outputStream* st) {
ysr@1580 528 st->print_cr("\n=========================");
ysr@1580 529 st->print_cr("Block layout in CMS Heap:");
ysr@1580 530 st->print_cr("=========================");
ysr@1580 531 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 532 blk_iterate(&bpcl);
ysr@1580 533
ysr@1580 534 st->print_cr("\n=======================================");
ysr@1580 535 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 536 st->print_cr("=======================================");
ysr@1580 537 print_promo_info_blocks(st);
ysr@1580 538
ysr@1580 539 st->print_cr("\n===========================");
ysr@1580 540 st->print_cr("Order of Indexed Free Lists");
ysr@1580 541 st->print_cr("=========================");
ysr@1580 542 print_indexed_free_lists(st);
ysr@1580 543
ysr@1580 544 st->print_cr("\n=================================");
ysr@1580 545 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 546 st->print_cr("=================================");
ysr@1580 547 print_dictionary_free_lists(st);
ysr@1580 548 }
ysr@1580 549
ysr@1580 550
duke@435 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 552 assert_lock_strong(&_freelistLock);
duke@435 553 assert(PrintFLSStatistics != 0, "Reporting error");
jmasa@3732 554 _dictionary->report_statistics();
duke@435 555 if (PrintFLSStatistics > 1) {
duke@435 556 reportIndexedFreeListStatistics();
jmasa@3732 557 size_t total_size = totalSizeInIndexedFreeLists() +
jmasa@3732 558 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
hseigel@4465 559 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
duke@435 560 }
duke@435 561 }
duke@435 562
duke@435 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 564 assert_lock_strong(&_freelistLock);
duke@435 565 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 566 "--------------------------------\n");
jmasa@3732 567 size_t total_size = totalSizeInIndexedFreeLists();
jmasa@3732 568 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
drchase@6680 569 gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
drchase@6680 570 gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
drchase@6680 571 gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
jmasa@3732 572 if (free_blocks != 0) {
drchase@6680 573 gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
duke@435 574 }
duke@435 575 }
duke@435 576
duke@435 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 578 size_t res = 0;
duke@435 579 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 580 debug_only(
duke@435 581 ssize_t recount = 0;
duke@435 582 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 583 fc = fc->next()) {
duke@435 584 recount += 1;
duke@435 585 }
duke@435 586 assert(recount == _indexedFreeList[i].count(),
duke@435 587 "Incorrect count in list");
duke@435 588 )
duke@435 589 res += _indexedFreeList[i].count();
duke@435 590 }
duke@435 591 return res;
duke@435 592 }
duke@435 593
duke@435 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 595 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 596 if (_indexedFreeList[i].head() != NULL) {
duke@435 597 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 598 return (size_t)i;
duke@435 599 }
duke@435 600 }
duke@435 601 return 0;
duke@435 602 }
duke@435 603
duke@435 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 605 HeapWord* prevEnd = end();
duke@435 606 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 607 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 608 "New end is below unallocated block");
duke@435 609 _end = value;
duke@435 610 if (prevEnd != NULL) {
duke@435 611 // Resize the underlying block offset table.
duke@435 612 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 613 if (value <= prevEnd) {
ysr@2071 614 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 615 "New end is below unallocated block");
ysr@1580 616 } else {
ysr@1580 617 // Now, take this new chunk and add it to the free blocks.
ysr@1580 618 // Note that the BOT has not yet been updated for this block.
ysr@1580 619 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 620 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 621 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 622 // Mark the boundary of the new block in BOT
ysr@1580 623 _bt.mark_block(prevEnd, value);
ysr@1580 624 // put it all in the linAB
ysr@1580 625 if (ParallelGCThreads == 0) {
ysr@1580 626 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 627 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 628 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 629 } else { // ParallelGCThreads > 0
ysr@1580 630 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 631 Mutex::_no_safepoint_check_flag);
ysr@1580 632 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 633 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 634 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 635 }
ysr@1580 636 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 637 // of chunks as they are allocated out of a LinAB are.
ysr@1580 638 } else {
ysr@1580 639 // Add the block to the free lists, if possible coalescing it
ysr@1580 640 // with the last free block, and update the BOT and census data.
ysr@1580 641 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 642 }
duke@435 643 }
duke@435 644 }
duke@435 645 }
duke@435 646
duke@435 647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 648 CompactibleFreeListSpace* _cfls;
duke@435 649 CMSCollector* _collector;
duke@435 650 protected:
duke@435 651 // Override.
duke@435 652 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 653 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 654 HeapWord* bottom, HeapWord* top, \
duke@435 655 ClosureType* cl); \
duke@435 656 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 657 HeapWord* bottom, HeapWord* top, \
duke@435 658 ClosureType* cl); \
duke@435 659 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 660 HeapWord* bottom, HeapWord* top, \
duke@435 661 ClosureType* cl)
coleenp@4037 662 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
duke@435 663 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 664
duke@435 665 public:
duke@435 666 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 667 CMSCollector* collector,
coleenp@4037 668 ExtendedOopClosure* cl,
duke@435 669 CardTableModRefBS::PrecisionStyle precision,
duke@435 670 HeapWord* boundary) :
duke@435 671 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 672 _cfls(sp), _collector(collector) {}
duke@435 673 };
duke@435 674
duke@435 675 // We de-virtualize the block-related calls below, since we know that our
duke@435 676 // space is a CompactibleFreeListSpace.
jmasa@3294 677
duke@435 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 680 HeapWord* bottom, \
duke@435 681 HeapWord* top, \
duke@435 682 ClosureType* cl) { \
jmasa@3294 683 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 684 if (is_par) { \
jmasa@3294 685 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 686 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 687 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 688 } else { \
duke@435 689 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 690 } \
duke@435 691 } \
duke@435 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 693 HeapWord* bottom, \
duke@435 694 HeapWord* top, \
duke@435 695 ClosureType* cl) { \
duke@435 696 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 697 back too far. */ \
duke@435 698 HeapWord* mr_start = mr.start(); \
duke@435 699 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 700 HeapWord* next = bottom + bot_size; \
duke@435 701 while (next < mr_start) { \
duke@435 702 bottom = next; \
duke@435 703 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 704 next = bottom + bot_size; \
duke@435 705 } \
duke@435 706 \
duke@435 707 while (bottom < top) { \
duke@435 708 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 709 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 710 oop(bottom)) && \
duke@435 711 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 712 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 713 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 714 } else { \
duke@435 715 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 716 } \
duke@435 717 } \
duke@435 718 } \
duke@435 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 720 HeapWord* bottom, \
duke@435 721 HeapWord* top, \
duke@435 722 ClosureType* cl) { \
duke@435 723 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 724 back too far. */ \
duke@435 725 HeapWord* mr_start = mr.start(); \
duke@435 726 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 727 HeapWord* next = bottom + bot_size; \
duke@435 728 while (next < mr_start) { \
duke@435 729 bottom = next; \
duke@435 730 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 731 next = bottom + bot_size; \
duke@435 732 } \
duke@435 733 \
duke@435 734 while (bottom < top) { \
duke@435 735 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 736 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 737 oop(bottom)) && \
duke@435 738 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 739 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 740 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 741 } else { \
duke@435 742 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 743 } \
duke@435 744 } \
duke@435 745 }
duke@435 746
duke@435 747 // (There are only two of these, rather than N, because the split is due
duke@435 748 // only to the introduction of the FilteringClosure, a local part of the
duke@435 749 // impl of this abstraction.)
coleenp@4037 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
duke@435 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 752
duke@435 753 DirtyCardToOopClosure*
coleenp@4037 754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 755 CardTableModRefBS::PrecisionStyle precision,
duke@435 756 HeapWord* boundary) {
duke@435 757 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 758 }
duke@435 759
duke@435 760
duke@435 761 // Note on locking for the space iteration functions:
duke@435 762 // since the collector's iteration activities are concurrent with
duke@435 763 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 764 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 765 // may suddenly be allocated or divided up and part of it allocated and
duke@435 766 // so on.
duke@435 767
duke@435 768 // Apply the given closure to each block in the space.
duke@435 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 770 assert_lock_strong(freelistLock());
duke@435 771 HeapWord *cur, *limit;
duke@435 772 for (cur = bottom(), limit = end(); cur < limit;
duke@435 773 cur += cl->do_blk_careful(cur));
duke@435 774 }
duke@435 775
duke@435 776 // Apply the given closure to each block in the space.
duke@435 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 778 assert_lock_strong(freelistLock());
duke@435 779 HeapWord *cur, *limit;
duke@435 780 for (cur = bottom(), limit = end(); cur < limit;
duke@435 781 cur += cl->do_blk(cur));
duke@435 782 }
duke@435 783
duke@435 784 // Apply the given closure to each oop in the space.
coleenp@4037 785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
duke@435 786 assert_lock_strong(freelistLock());
duke@435 787 HeapWord *cur, *limit;
duke@435 788 size_t curSize;
duke@435 789 for (cur = bottom(), limit = end(); cur < limit;
duke@435 790 cur += curSize) {
duke@435 791 curSize = block_size(cur);
duke@435 792 if (block_is_obj(cur)) {
duke@435 793 oop(cur)->oop_iterate(cl);
duke@435 794 }
duke@435 795 }
duke@435 796 }
duke@435 797
duke@435 798 // NOTE: In the following methods, in order to safely be able to
duke@435 799 // apply the closure to an object, we need to be sure that the
duke@435 800 // object has been initialized. We are guaranteed that an object
duke@435 801 // is initialized if we are holding the Heap_lock with the
duke@435 802 // world stopped.
duke@435 803 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 804 if (is_init_completed()) {
duke@435 805 assert_locked_or_safepoint(Heap_lock);
duke@435 806 if (Universe::is_fully_initialized()) {
duke@435 807 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 808 "Required for objects to be initialized");
duke@435 809 }
duke@435 810 } // else make a concession at vm start-up
duke@435 811 }
duke@435 812
duke@435 813 // Apply the given closure to each object in the space
duke@435 814 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 815 assert_lock_strong(freelistLock());
duke@435 816 NOT_PRODUCT(verify_objects_initialized());
duke@435 817 HeapWord *cur, *limit;
duke@435 818 size_t curSize;
duke@435 819 for (cur = bottom(), limit = end(); cur < limit;
duke@435 820 cur += curSize) {
duke@435 821 curSize = block_size(cur);
duke@435 822 if (block_is_obj(cur)) {
duke@435 823 blk->do_object(oop(cur));
duke@435 824 }
duke@435 825 }
duke@435 826 }
duke@435 827
jmasa@952 828 // Apply the given closure to each live object in the space
jmasa@952 829 // The usage of CompactibleFreeListSpace
jmasa@952 830 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 831 // objects in the space with references to objects that are no longer
jmasa@952 832 // valid. For example, an object may reference another object
jmasa@952 833 // that has already been sweep up (collected). This method uses
jmasa@952 834 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 835 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 836 // object is decided.
jmasa@952 837
jmasa@952 838 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 839 assert_lock_strong(freelistLock());
jmasa@952 840 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 841 HeapWord *cur, *limit;
jmasa@952 842 size_t curSize;
jmasa@952 843 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 844 cur += curSize) {
jmasa@952 845 curSize = block_size(cur);
jmasa@952 846 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 847 blk->do_object(oop(cur));
jmasa@952 848 }
jmasa@952 849 }
jmasa@952 850 }
jmasa@952 851
duke@435 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 853 UpwardsObjectClosure* cl) {
ysr@1580 854 assert_locked(freelistLock());
duke@435 855 NOT_PRODUCT(verify_objects_initialized());
mgerdin@6979 856 assert(!mr.is_empty(), "Should be non-empty");
mgerdin@6979 857 // We use MemRegion(bottom(), end()) rather than used_region() below
mgerdin@6979 858 // because the two are not necessarily equal for some kinds of
mgerdin@6979 859 // spaces, in particular, certain kinds of free list spaces.
mgerdin@6979 860 // We could use the more complicated but more precise:
mgerdin@6979 861 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
mgerdin@6979 862 // but the slight imprecision seems acceptable in the assertion check.
mgerdin@6979 863 assert(MemRegion(bottom(), end()).contains(mr),
mgerdin@6979 864 "Should be within used space");
mgerdin@6979 865 HeapWord* prev = cl->previous(); // max address from last time
mgerdin@6979 866 if (prev >= mr.end()) { // nothing to do
mgerdin@6979 867 return;
mgerdin@6979 868 }
mgerdin@6979 869 // This assert will not work when we go from cms space to perm
mgerdin@6979 870 // space, and use same closure. Easy fix deferred for later. XXX YSR
mgerdin@6979 871 // assert(prev == NULL || contains(prev), "Should be within space");
mgerdin@6979 872
mgerdin@6979 873 bool last_was_obj_array = false;
mgerdin@6979 874 HeapWord *blk_start_addr, *region_start_addr;
mgerdin@6979 875 if (prev > mr.start()) {
mgerdin@6979 876 region_start_addr = prev;
mgerdin@6979 877 blk_start_addr = prev;
mgerdin@6979 878 // The previous invocation may have pushed "prev" beyond the
mgerdin@6979 879 // last allocated block yet there may be still be blocks
mgerdin@6979 880 // in this region due to a particular coalescing policy.
mgerdin@6979 881 // Relax the assertion so that the case where the unallocated
mgerdin@6979 882 // block is maintained and "prev" is beyond the unallocated
mgerdin@6979 883 // block does not cause the assertion to fire.
mgerdin@6979 884 assert((BlockOffsetArrayUseUnallocatedBlock &&
mgerdin@6979 885 (!is_in(prev))) ||
mgerdin@6979 886 (blk_start_addr == block_start(region_start_addr)), "invariant");
mgerdin@6979 887 } else {
mgerdin@6979 888 region_start_addr = mr.start();
mgerdin@6979 889 blk_start_addr = block_start(region_start_addr);
mgerdin@6979 890 }
mgerdin@6979 891 HeapWord* region_end_addr = mr.end();
mgerdin@6979 892 MemRegion derived_mr(region_start_addr, region_end_addr);
mgerdin@6979 893 while (blk_start_addr < region_end_addr) {
mgerdin@6979 894 const size_t size = block_size(blk_start_addr);
mgerdin@6979 895 if (block_is_obj(blk_start_addr)) {
mgerdin@6979 896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
mgerdin@6979 897 } else {
mgerdin@6979 898 last_was_obj_array = false;
mgerdin@6979 899 }
mgerdin@6979 900 blk_start_addr += size;
mgerdin@6979 901 }
mgerdin@6979 902 if (!last_was_obj_array) {
mgerdin@6979 903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
mgerdin@6979 904 "Should be within (closed) used space");
mgerdin@6979 905 assert(blk_start_addr > prev, "Invariant");
mgerdin@6979 906 cl->set_previous(blk_start_addr); // min address for next time
mgerdin@6979 907 }
duke@435 908 }
duke@435 909
duke@435 910
duke@435 911 // Callers of this iterator beware: The closure application should
duke@435 912 // be robust in the face of uninitialized objects and should (always)
duke@435 913 // return a correct size so that the next addr + size below gives us a
duke@435 914 // valid block boundary. [See for instance,
duke@435 915 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 916 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 917 HeapWord*
duke@435 918 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 919 ObjectClosureCareful* cl) {
duke@435 920 assert_lock_strong(freelistLock());
duke@435 921 // Can't use used_region() below because it may not necessarily
duke@435 922 // be the same as [bottom(),end()); although we could
duke@435 923 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 924 // that appears too cumbersome, so we just do the simpler check
duke@435 925 // in the assertion below.
duke@435 926 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 927 "mr should be non-empty and within used space");
duke@435 928 HeapWord *addr, *end;
duke@435 929 size_t size;
duke@435 930 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 931 addr < end; addr += size) {
duke@435 932 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 933 if (fc->is_free()) {
duke@435 934 // Since we hold the free list lock, which protects direct
duke@435 935 // allocation in this generation by mutators, a free object
duke@435 936 // will remain free throughout this iteration code.
duke@435 937 size = fc->size();
duke@435 938 } else {
duke@435 939 // Note that the object need not necessarily be initialized,
duke@435 940 // because (for instance) the free list lock does NOT protect
duke@435 941 // object initialization. The closure application below must
duke@435 942 // therefore be correct in the face of uninitialized objects.
duke@435 943 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 944 if (size == 0) {
duke@435 945 // An unparsable object found. Signal early termination.
duke@435 946 return addr;
duke@435 947 }
duke@435 948 }
duke@435 949 }
duke@435 950 return NULL;
duke@435 951 }
duke@435 952
duke@435 953
ysr@777 954 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 955 NOT_PRODUCT(verify_objects_initialized());
duke@435 956 return _bt.block_start(p);
duke@435 957 }
duke@435 958
duke@435 959 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 960 return _bt.block_start_careful(p);
duke@435 961 }
duke@435 962
duke@435 963 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 964 NOT_PRODUCT(verify_objects_initialized());
duke@435 965 // This must be volatile, or else there is a danger that the compiler
duke@435 966 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 967 // the value read the first time in a register.
duke@435 968 while (true) {
duke@435 969 // We must do this until we get a consistent view of the object.
coleenp@622 970 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 971 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 972 size_t res = fc->size();
goetz@6493 973
goetz@6493 974 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 975 // block's free bit was set and we have read the size of the
goetz@6493 976 // block. Acquire and check the free bit again. If the block is
goetz@6493 977 // still free, the read size is correct.
goetz@6493 978 OrderAccess::acquire();
goetz@6493 979
coleenp@622 980 // If the object is still a free chunk, return the size, else it
coleenp@622 981 // has been allocated so try again.
coleenp@622 982 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 983 assert(res != 0, "Block size should not be 0");
duke@435 984 return res;
duke@435 985 }
coleenp@622 986 } else {
coleenp@622 987 // must read from what 'p' points to in each loop.
coleenp@4037 988 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 989 if (k != NULL) {
coleenp@4037 990 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 991 oop o = (oop)p;
coleenp@622 992 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
goetz@6493 993
goetz@6493 994 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 995 // The object o may be an array. Acquire to make sure that the array
goetz@6493 996 // size (third word) is consistent.
goetz@6493 997 OrderAccess::acquire();
goetz@6493 998
coleenp@4037 999 size_t res = o->size_given_klass(k);
coleenp@622 1000 res = adjustObjectSize(res);
coleenp@622 1001 assert(res != 0, "Block size should not be 0");
coleenp@622 1002 return res;
coleenp@622 1003 }
duke@435 1004 }
duke@435 1005 }
duke@435 1006 }
duke@435 1007
coleenp@4037 1008 // TODO: Now that is_parsable is gone, we should combine these two functions.
duke@435 1009 // A variant of the above that uses the Printezis bits for
duke@435 1010 // unparsable but allocated objects. This avoids any possible
duke@435 1011 // stalls waiting for mutators to initialize objects, and is
duke@435 1012 // thus potentially faster than the variant above. However,
duke@435 1013 // this variant may return a zero size for a block that is
duke@435 1014 // under mutation and for which a consistent size cannot be
duke@435 1015 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1016 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1017 const CMSCollector* c)
duke@435 1018 const {
duke@435 1019 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1020 // This must be volatile, or else there is a danger that the compiler
duke@435 1021 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1022 // the value read the first time in a register.
duke@435 1023 DEBUG_ONLY(uint loops = 0;)
duke@435 1024 while (true) {
duke@435 1025 // We must do this until we get a consistent view of the object.
coleenp@622 1026 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1027 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1028 size_t res = fc->size();
goetz@6493 1029
goetz@6493 1030 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1031 // free bit of the block was set and we have read the size of
goetz@6493 1032 // the block. Acquire and check the free bit again. If the
goetz@6493 1033 // block is still free, the read size is correct.
goetz@6493 1034 OrderAccess::acquire();
goetz@6493 1035
coleenp@622 1036 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1037 assert(res != 0, "Block size should not be 0");
duke@435 1038 assert(loops == 0, "Should be 0");
duke@435 1039 return res;
duke@435 1040 }
duke@435 1041 } else {
coleenp@622 1042 // must read from what 'p' points to in each loop.
coleenp@4037 1043 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1044 // We trust the size of any object that has a non-NULL
ysr@2533 1045 // klass and (for those in the perm gen) is parsable
ysr@2533 1046 // -- irrespective of its conc_safe-ty.
coleenp@4037 1047 if (k != NULL) {
coleenp@4037 1048 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1049 oop o = (oop)p;
coleenp@622 1050 assert(o->is_oop(), "Should be an oop");
goetz@6493 1051
goetz@6493 1052 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1053 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1054 // size (third word) is consistent.
goetz@6493 1055 OrderAccess::acquire();
goetz@6493 1056
coleenp@4037 1057 size_t res = o->size_given_klass(k);
coleenp@622 1058 res = adjustObjectSize(res);
coleenp@622 1059 assert(res != 0, "Block size should not be 0");
coleenp@622 1060 return res;
coleenp@622 1061 } else {
ysr@2533 1062 // May return 0 if P-bits not present.
coleenp@622 1063 return c->block_size_if_printezis_bits(p);
coleenp@622 1064 }
duke@435 1065 }
duke@435 1066 assert(loops == 0, "Can loop at most once");
duke@435 1067 DEBUG_ONLY(loops++;)
duke@435 1068 }
duke@435 1069 }
duke@435 1070
duke@435 1071 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1072 NOT_PRODUCT(verify_objects_initialized());
duke@435 1073 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1074 FreeChunk* fc = (FreeChunk*)p;
jmasa@3732 1075 if (fc->is_free()) {
duke@435 1076 return fc->size();
duke@435 1077 } else {
duke@435 1078 // Ignore mark word because this may be a recently promoted
duke@435 1079 // object whose mark word is used to chain together grey
duke@435 1080 // objects (the last one would have a null value).
duke@435 1081 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1082 return adjustObjectSize(oop(p)->size());
duke@435 1083 }
duke@435 1084 }
duke@435 1085
duke@435 1086 // This implementation assumes that the property of "being an object" is
duke@435 1087 // stable. But being a free chunk may not be (because of parallel
duke@435 1088 // promotion.)
duke@435 1089 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1090 FreeChunk* fc = (FreeChunk*)p;
duke@435 1091 assert(is_in_reserved(p), "Should be in space");
duke@435 1092 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1093 // assertion may fail because prepare_for_compaction() uses
duke@435 1094 // space that is garbage to maintain information on ranges of
duke@435 1095 // live objects so that these live ranges can be moved as a whole.
duke@435 1096 // Comment out this assertion until that problem can be solved
duke@435 1097 // (i.e., that the block start calculation may look at objects
duke@435 1098 // at address below "p" in finding the object that contains "p"
duke@435 1099 // and those objects (if garbage) may have been modified to hold
duke@435 1100 // live range information.
jmasa@2188 1101 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1102 // "Should be a block boundary");
coleenp@622 1103 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@4037 1104 Klass* k = oop(p)->klass_or_null();
duke@435 1105 if (k != NULL) {
duke@435 1106 // Ignore mark word because it may have been used to
duke@435 1107 // chain together promoted objects (the last one
duke@435 1108 // would have a null value).
duke@435 1109 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1110 return true;
duke@435 1111 } else {
duke@435 1112 return false; // Was not an object at the start of collection.
duke@435 1113 }
duke@435 1114 }
duke@435 1115
duke@435 1116 // Check if the object is alive. This fact is checked either by consulting
duke@435 1117 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1118 // generation and we're not in the sweeping phase, by checking the
duke@435 1119 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1120 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1121 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1122 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1123 "Else races are possible");
ysr@2293 1124 assert(block_is_obj(p), "The address should point to an object");
duke@435 1125
duke@435 1126 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1127 // for both perm gen and old gen.
duke@435 1128 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1129 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1130 // main marking bit map (live_map below) is locked,
duke@435 1131 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1132 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1133 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1134 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1135 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1136 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1137 // if/when the perm gen goes away in the future.
duke@435 1138 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1139 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1140 return live_map->par_isMarked((HeapWord*) p);
duke@435 1141 }
duke@435 1142 return true;
duke@435 1143 }
duke@435 1144
duke@435 1145 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1146 FreeChunk* fc = (FreeChunk*)p;
duke@435 1147 assert(is_in_reserved(p), "Should be in space");
duke@435 1148 assert(_bt.block_start(p) == p, "Should be a block boundary");
jmasa@3732 1149 if (!fc->is_free()) {
duke@435 1150 // Ignore mark word because it may have been used to
duke@435 1151 // chain together promoted objects (the last one
duke@435 1152 // would have a null value).
duke@435 1153 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1154 return true;
duke@435 1155 }
duke@435 1156 return false;
duke@435 1157 }
duke@435 1158
duke@435 1159 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1160 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1161 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1162 size_t size = 0;
duke@435 1163 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1164 debug_only(
duke@435 1165 // We may be calling here without the lock in which case we
duke@435 1166 // won't do this modest sanity check.
duke@435 1167 if (freelistLock()->owned_by_self()) {
duke@435 1168 size_t total_list_size = 0;
duke@435 1169 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1170 fc = fc->next()) {
duke@435 1171 total_list_size += i;
duke@435 1172 }
duke@435 1173 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1174 "Count in list is incorrect");
duke@435 1175 }
duke@435 1176 )
duke@435 1177 size += i * _indexedFreeList[i].count();
duke@435 1178 }
duke@435 1179 return size;
duke@435 1180 }
duke@435 1181
duke@435 1182 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1183 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1184 return allocate(size);
duke@435 1185 }
duke@435 1186
duke@435 1187 HeapWord*
duke@435 1188 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1189 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1190 }
duke@435 1191
duke@435 1192 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1193 assert_lock_strong(freelistLock());
duke@435 1194 HeapWord* res = NULL;
duke@435 1195 assert(size == adjustObjectSize(size),
duke@435 1196 "use adjustObjectSize() before calling into allocate()");
duke@435 1197
duke@435 1198 if (_adaptive_freelists) {
duke@435 1199 res = allocate_adaptive_freelists(size);
duke@435 1200 } else { // non-adaptive free lists
duke@435 1201 res = allocate_non_adaptive_freelists(size);
duke@435 1202 }
duke@435 1203
duke@435 1204 if (res != NULL) {
duke@435 1205 // check that res does lie in this space!
duke@435 1206 assert(is_in_reserved(res), "Not in this space!");
duke@435 1207 assert(is_aligned((void*)res), "alignment check");
duke@435 1208
duke@435 1209 FreeChunk* fc = (FreeChunk*)res;
duke@435 1210 fc->markNotFree();
jmasa@3732 1211 assert(!fc->is_free(), "shouldn't be marked free");
coleenp@622 1212 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1213 // Verify that the block offset table shows this to
duke@435 1214 // be a single block, but not one which is unallocated.
duke@435 1215 _bt.verify_single_block(res, size);
duke@435 1216 _bt.verify_not_unallocated(res, size);
duke@435 1217 // mangle a just allocated object with a distinct pattern.
duke@435 1218 debug_only(fc->mangleAllocated(size));
duke@435 1219 }
duke@435 1220
duke@435 1221 return res;
duke@435 1222 }
duke@435 1223
duke@435 1224 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1225 HeapWord* res = NULL;
duke@435 1226 // try and use linear allocation for smaller blocks
duke@435 1227 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1228 // if successful, the following also adjusts block offset table
duke@435 1229 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1230 }
duke@435 1231 // Else triage to indexed lists for smaller sizes
duke@435 1232 if (res == NULL) {
duke@435 1233 if (size < SmallForDictionary) {
duke@435 1234 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1235 } else {
duke@435 1236 // else get it from the big dictionary; if even this doesn't
duke@435 1237 // work we are out of luck.
duke@435 1238 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1239 }
duke@435 1240 }
duke@435 1241
duke@435 1242 return res;
duke@435 1243 }
duke@435 1244
duke@435 1245 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1246 assert_lock_strong(freelistLock());
duke@435 1247 HeapWord* res = NULL;
duke@435 1248 assert(size == adjustObjectSize(size),
duke@435 1249 "use adjustObjectSize() before calling into allocate()");
duke@435 1250
duke@435 1251 // Strategy
duke@435 1252 // if small
duke@435 1253 // exact size from small object indexed list if small
duke@435 1254 // small or large linear allocation block (linAB) as appropriate
duke@435 1255 // take from lists of greater sized chunks
duke@435 1256 // else
duke@435 1257 // dictionary
duke@435 1258 // small or large linear allocation block if it has the space
duke@435 1259 // Try allocating exact size from indexTable first
duke@435 1260 if (size < IndexSetSize) {
duke@435 1261 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1262 if(res != NULL) {
duke@435 1263 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1264 "Not removed from free list");
duke@435 1265 // no block offset table adjustment is necessary on blocks in
duke@435 1266 // the indexed lists.
duke@435 1267
duke@435 1268 // Try allocating from the small LinAB
duke@435 1269 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1270 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1271 // if successful, the above also adjusts block offset table
duke@435 1272 // Note that this call will refill the LinAB to
duke@435 1273 // satisfy the request. This is different that
duke@435 1274 // evm.
duke@435 1275 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1276 } else {
duke@435 1277 // Raid the exact free lists larger than size, even if they are not
duke@435 1278 // overpopulated.
duke@435 1279 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1280 }
duke@435 1281 } else {
duke@435 1282 // Big objects get allocated directly from the dictionary.
duke@435 1283 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1284 if (res == NULL) {
duke@435 1285 // Try hard not to fail since an allocation failure will likely
duke@435 1286 // trigger a synchronous GC. Try to get the space from the
duke@435 1287 // allocation blocks.
duke@435 1288 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1289 }
duke@435 1290 }
duke@435 1291
duke@435 1292 return res;
duke@435 1293 }
duke@435 1294
duke@435 1295 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1296 // when promoting obj.
duke@435 1297 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1298 // Depending on the object size, expansion may require refilling either a
duke@435 1299 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1300 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1301 size_t space = obj_size;
duke@435 1302 if (!_adaptive_freelists) {
duke@435 1303 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1304 }
duke@435 1305 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1306 return space;
duke@435 1307 }
duke@435 1308
duke@435 1309 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1310 FreeChunk* ret;
duke@435 1311
duke@435 1312 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1313 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1314 "Should not be here");
duke@435 1315
duke@435 1316 size_t i;
duke@435 1317 size_t currSize = numWords + MinChunkSize;
duke@435 1318 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1319 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 1320 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
duke@435 1321 if (fl->head()) {
duke@435 1322 ret = getFromListGreater(fl, numWords);
jmasa@3732 1323 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1324 return ret;
duke@435 1325 }
duke@435 1326 }
duke@435 1327
duke@435 1328 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1329 (size_t)(numWords + MinChunkSize));
duke@435 1330
duke@435 1331 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1332 fragmentation that can't be handled. */
duke@435 1333 {
jmasa@3732 1334 ret = dictionary()->get_chunk(currSize);
duke@435 1335 if (ret != NULL) {
duke@435 1336 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1337 "Chunk is too small");
duke@435 1338 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1339 /* Carve returned chunk. */
duke@435 1340 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1341 /* Label this as no longer a free chunk. */
jmasa@3732 1342 assert(ret->is_free(), "This chunk should be free");
jmasa@3732 1343 ret->link_prev(NULL);
duke@435 1344 }
jmasa@3732 1345 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1346 return ret;
duke@435 1347 }
duke@435 1348 ShouldNotReachHere();
duke@435 1349 }
duke@435 1350
ysr@3220 1351 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1352 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
jmasa@3732 1353 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
duke@435 1354 }
duke@435 1355
ysr@3220 1356 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1357 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1358 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1359 "Linear allocation block shows incorrect size");
ysr@3220 1360 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1361 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1362 }
ysr@3220 1363
ysr@3220 1364 // Check if the purported free chunk is present either as a linear
ysr@3220 1365 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1366 // or the larger free blocks kept in the binary tree dictionary.
jmasa@3732 1367 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
ysr@3220 1368 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1369 return true;
ysr@3220 1370 } else if (fc->size() < IndexSetSize) {
ysr@3220 1371 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1372 } else {
jmasa@3732 1373 return dictionary()->verify_chunk_in_free_list(fc);
duke@435 1374 }
duke@435 1375 }
duke@435 1376
duke@435 1377 #ifndef PRODUCT
duke@435 1378 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1379 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1380 }
ysr@1580 1381
ysr@1580 1382 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1383 CMSLockVerifier::assert_locked(lock);
ysr@1580 1384 }
duke@435 1385 #endif
duke@435 1386
duke@435 1387 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1388 // In the parallel case, the main thread holds the free list lock
duke@435 1389 // on behalf the parallel threads.
duke@435 1390 FreeChunk* fc;
duke@435 1391 {
duke@435 1392 // If GC is parallel, this might be called by several threads.
duke@435 1393 // This should be rare enough that the locking overhead won't affect
duke@435 1394 // the sequential code.
duke@435 1395 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1396 Mutex::_no_safepoint_check_flag);
duke@435 1397 fc = getChunkFromDictionary(size);
duke@435 1398 }
duke@435 1399 if (fc != NULL) {
duke@435 1400 fc->dontCoalesce();
jmasa@3732 1401 assert(fc->is_free(), "Should be free, but not coalescable");
duke@435 1402 // Verify that the block offset table shows this to
duke@435 1403 // be a single block, but not one which is unallocated.
duke@435 1404 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1405 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1406 }
duke@435 1407 return fc;
duke@435 1408 }
duke@435 1409
coleenp@548 1410 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1411 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1412 assert_locked();
duke@435 1413
duke@435 1414 // if we are tracking promotions, then first ensure space for
duke@435 1415 // promotion (including spooling space for saving header if necessary).
duke@435 1416 // then allocate and copy, then track promoted info if needed.
duke@435 1417 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1418 // be displaced and in this case restoration of the mark word
duke@435 1419 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1420 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1421 return NULL;
duke@435 1422 }
duke@435 1423 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1424 // additional call through the allocate(size_t) form. Having
duke@435 1425 // the compile inline the call is problematic because allocate(size_t)
duke@435 1426 // is a virtual method.
duke@435 1427 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1428 if (res != NULL) {
duke@435 1429 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1430 // if we should be tracking promotions, do so.
duke@435 1431 if (_promoInfo.tracking()) {
duke@435 1432 _promoInfo.track((PromotedObject*)res);
duke@435 1433 }
duke@435 1434 }
duke@435 1435 return oop(res);
duke@435 1436 }
duke@435 1437
duke@435 1438 HeapWord*
duke@435 1439 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1440 assert_locked();
duke@435 1441 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1442 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1443 "maximum from smallLinearAllocBlock");
duke@435 1444 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1445 }
duke@435 1446
duke@435 1447 HeapWord*
duke@435 1448 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1449 size_t size) {
duke@435 1450 assert_locked();
duke@435 1451 assert(size >= MinChunkSize, "too small");
duke@435 1452 HeapWord* res = NULL;
duke@435 1453 // Try to do linear allocation from blk, making sure that
duke@435 1454 if (blk->_word_size == 0) {
duke@435 1455 // We have probably been unable to fill this either in the prologue or
duke@435 1456 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1457 // next time.
duke@435 1458 assert(blk->_ptr == NULL, "consistency check");
duke@435 1459 return NULL;
duke@435 1460 }
duke@435 1461 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1462 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1463 if (res != NULL) return res;
duke@435 1464
duke@435 1465 // about to exhaust this linear allocation block
duke@435 1466 if (blk->_word_size == size) { // exactly satisfied
duke@435 1467 res = blk->_ptr;
duke@435 1468 _bt.allocated(res, blk->_word_size);
duke@435 1469 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1470 size_t sz = blk->_word_size;
duke@435 1471 // Update _unallocated_block if the size is such that chunk would be
duke@435 1472 // returned to the indexed free list. All other chunks in the indexed
duke@435 1473 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1474 // has already been adjusted for them. Do it here so that the cost
duke@435 1475 // for all chunks added back to the indexed free lists.
ysr@1580 1476 if (sz < SmallForDictionary) {
ysr@1580 1477 _bt.allocated(blk->_ptr, sz);
duke@435 1478 }
duke@435 1479 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1480 addChunkToFreeLists(blk->_ptr, sz);
jmasa@3732 1481 split_birth(sz);
duke@435 1482 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1483 } else {
duke@435 1484 // A refilled block would not satisfy the request.
duke@435 1485 return NULL;
duke@435 1486 }
duke@435 1487
duke@435 1488 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1489 refillLinearAllocBlock(blk);
duke@435 1490 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1491 "block was replenished");
duke@435 1492 if (res != NULL) {
jmasa@3732 1493 split_birth(size);
duke@435 1494 repairLinearAllocBlock(blk);
duke@435 1495 } else if (blk->_ptr != NULL) {
duke@435 1496 res = blk->_ptr;
duke@435 1497 size_t blk_size = blk->_word_size;
duke@435 1498 blk->_word_size -= size;
duke@435 1499 blk->_ptr += size;
jmasa@3732 1500 split_birth(size);
duke@435 1501 repairLinearAllocBlock(blk);
duke@435 1502 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1503 // view of the BOT and free blocks.
duke@435 1504 // Above must occur before BOT is updated below.
ysr@2071 1505 OrderAccess::storestore();
duke@435 1506 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1507 }
duke@435 1508 return res;
duke@435 1509 }
duke@435 1510
duke@435 1511 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1512 LinearAllocBlock* blk,
duke@435 1513 size_t size) {
duke@435 1514 assert_locked();
duke@435 1515 assert(size >= MinChunkSize, "too small");
duke@435 1516
duke@435 1517 HeapWord* res = NULL;
duke@435 1518 // This is the common case. Keep it simple.
duke@435 1519 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1520 assert(blk->_ptr != NULL, "consistency check");
duke@435 1521 res = blk->_ptr;
duke@435 1522 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1523 // indicates the start of the linAB. The split_block() updates the
duke@435 1524 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1525 // next chunk to be allocated).
duke@435 1526 size_t blk_size = blk->_word_size;
duke@435 1527 blk->_word_size -= size;
duke@435 1528 blk->_ptr += size;
jmasa@3732 1529 split_birth(size);
duke@435 1530 repairLinearAllocBlock(blk);
duke@435 1531 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1532 // view of the BOT and free blocks.
duke@435 1533 // Above must occur before BOT is updated below.
ysr@2071 1534 OrderAccess::storestore();
duke@435 1535 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1536 _bt.allocated(res, size);
duke@435 1537 }
duke@435 1538 return res;
duke@435 1539 }
duke@435 1540
duke@435 1541 FreeChunk*
duke@435 1542 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1543 assert_locked();
duke@435 1544 assert(size < SmallForDictionary, "just checking");
duke@435 1545 FreeChunk* res;
jmasa@3732 1546 res = _indexedFreeList[size].get_chunk_at_head();
duke@435 1547 if (res == NULL) {
duke@435 1548 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1549 }
duke@435 1550 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1551 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1552 return res;
duke@435 1553 }
duke@435 1554
duke@435 1555 FreeChunk*
ysr@1580 1556 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1557 bool replenish) {
duke@435 1558 assert_locked();
duke@435 1559 FreeChunk* fc = NULL;
duke@435 1560 if (size < SmallForDictionary) {
duke@435 1561 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1562 _indexedFreeList[size].surplus() <= 0,
duke@435 1563 "List for this size should be empty or under populated");
duke@435 1564 // Try best fit in exact lists before replenishing the list
duke@435 1565 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1566 // Replenish list.
duke@435 1567 //
duke@435 1568 // Things tried that failed.
duke@435 1569 // Tried allocating out of the two LinAB's first before
duke@435 1570 // replenishing lists.
duke@435 1571 // Tried small linAB of size 256 (size in indexed list)
duke@435 1572 // and replenishing indexed lists from the small linAB.
duke@435 1573 //
duke@435 1574 FreeChunk* newFc = NULL;
ysr@1580 1575 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1576 if (replenish_size < SmallForDictionary) {
duke@435 1577 // Do not replenish from an underpopulated size.
duke@435 1578 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1579 _indexedFreeList[replenish_size].head() != NULL) {
jmasa@3732 1580 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
ysr@1580 1581 } else if (bestFitFirst()) {
duke@435 1582 newFc = bestFitSmall(replenish_size);
duke@435 1583 }
duke@435 1584 }
ysr@1580 1585 if (newFc == NULL && replenish_size > size) {
ysr@1580 1586 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1587 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1588 }
ysr@1580 1589 // Note: The stats update re split-death of block obtained above
ysr@1580 1590 // will be recorded below precisely when we know we are going to
ysr@1580 1591 // be actually splitting it into more than one pieces below.
duke@435 1592 if (newFc != NULL) {
ysr@1580 1593 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1594 // Replenish this list and return one block to caller.
ysr@1580 1595 size_t i;
ysr@1580 1596 FreeChunk *curFc, *nextFc;
ysr@1580 1597 size_t num_blk = newFc->size() / size;
ysr@1580 1598 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1599 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1600 if (num_blk > 1) {
ysr@1580 1601 // we are sure we will be splitting the block just obtained
ysr@1580 1602 // into multiple pieces; record the split-death of the original
ysr@1580 1603 splitDeath(replenish_size);
ysr@1580 1604 }
ysr@1580 1605 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1606 // The last chunk is not added to the lists but is returned as the
ysr@1580 1607 // free chunk.
ysr@1580 1608 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1609 i = 0;
ysr@1580 1610 i < (num_blk - 1);
ysr@1580 1611 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1612 i++) {
jmasa@3732 1613 curFc->set_size(size);
ysr@1580 1614 // Don't record this as a return in order to try and
ysr@1580 1615 // determine the "returns" from a GC.
ysr@1580 1616 _bt.verify_not_unallocated((HeapWord*) fc, size);
jmasa@3732 1617 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
ysr@1580 1618 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1619 split_birth(size);
ysr@1580 1620 // Don't record the initial population of the indexed list
ysr@1580 1621 // as a split birth.
ysr@1580 1622 }
ysr@1580 1623
ysr@1580 1624 // check that the arithmetic was OK above
ysr@1580 1625 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1626 "inconsistency in carving newFc");
jmasa@3732 1627 curFc->set_size(size);
duke@435 1628 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1629 split_birth(size);
ysr@1580 1630 fc = curFc;
ysr@1580 1631 } else {
ysr@1580 1632 // Return entire block to caller
ysr@1580 1633 fc = newFc;
duke@435 1634 }
duke@435 1635 }
duke@435 1636 }
duke@435 1637 } else {
duke@435 1638 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1639 // replenish the indexed free list.
duke@435 1640 fc = getChunkFromDictionaryExact(size);
duke@435 1641 }
jmasa@3732 1642 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
duke@435 1643 return fc;
duke@435 1644 }
duke@435 1645
duke@435 1646 FreeChunk*
duke@435 1647 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1648 assert_locked();
jmasa@4488 1649 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1650 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1651 if (fc == NULL) {
duke@435 1652 return NULL;
duke@435 1653 }
duke@435 1654 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1655 if (fc->size() >= size + MinChunkSize) {
duke@435 1656 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1657 }
duke@435 1658 assert(fc->size() >= size, "chunk too small");
duke@435 1659 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1660 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1661 return fc;
duke@435 1662 }
duke@435 1663
duke@435 1664 FreeChunk*
duke@435 1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1666 assert_locked();
jmasa@4488 1667 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1668 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1669 if (fc == NULL) {
duke@435 1670 return fc;
duke@435 1671 }
duke@435 1672 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1673 if (fc->size() == size) {
duke@435 1674 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1675 return fc;
duke@435 1676 }
jmasa@3732 1677 assert(fc->size() > size, "get_chunk() guarantee");
duke@435 1678 if (fc->size() < size + MinChunkSize) {
duke@435 1679 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1680 returnChunkToDictionary(fc);
jmasa@4488 1681 fc = _dictionary->get_chunk(size + MinChunkSize,
jmasa@4488 1682 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1683 if (fc == NULL) {
duke@435 1684 return NULL;
duke@435 1685 }
duke@435 1686 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1687 }
duke@435 1688 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1689 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1690 assert(fc->size() == size, "chunk is wrong size");
duke@435 1691 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1692 return fc;
duke@435 1693 }
duke@435 1694
duke@435 1695 void
duke@435 1696 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1697 assert_locked();
duke@435 1698
duke@435 1699 size_t size = chunk->size();
duke@435 1700 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1701 // adjust _unallocated_block downward, as necessary
duke@435 1702 _bt.freed((HeapWord*)chunk, size);
jmasa@3732 1703 _dictionary->return_chunk(chunk);
ysr@1580 1704 #ifndef PRODUCT
ysr@1580 1705 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
goetz@6337 1706 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
goetz@6337 1707 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
jmasa@4196 1708 tl->verify_stats();
ysr@1580 1709 }
ysr@1580 1710 #endif // PRODUCT
duke@435 1711 }
duke@435 1712
duke@435 1713 void
duke@435 1714 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1715 assert_locked();
duke@435 1716 size_t size = fc->size();
duke@435 1717 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1718 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1719 if (_adaptive_freelists) {
jmasa@3732 1720 _indexedFreeList[size].return_chunk_at_tail(fc);
duke@435 1721 } else {
jmasa@3732 1722 _indexedFreeList[size].return_chunk_at_head(fc);
duke@435 1723 }
ysr@1580 1724 #ifndef PRODUCT
ysr@1580 1725 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1726 _indexedFreeList[size].verify_stats();
ysr@1580 1727 }
ysr@1580 1728 #endif // PRODUCT
duke@435 1729 }
duke@435 1730
duke@435 1731 // Add chunk to end of last block -- if it's the largest
duke@435 1732 // block -- and update BOT and census data. We would
duke@435 1733 // of course have preferred to coalesce it with the
duke@435 1734 // last block, but it's currently less expensive to find the
duke@435 1735 // largest block than it is to find the last.
duke@435 1736 void
duke@435 1737 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1738 HeapWord* chunk, size_t size) {
duke@435 1739 // check that the chunk does lie in this space!
duke@435 1740 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1741 // One of the parallel gc task threads may be here
duke@435 1742 // whilst others are allocating.
duke@435 1743 Mutex* lock = NULL;
duke@435 1744 if (ParallelGCThreads != 0) {
duke@435 1745 lock = &_parDictionaryAllocLock;
duke@435 1746 }
duke@435 1747 FreeChunk* ec;
duke@435 1748 {
duke@435 1749 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
jmasa@3732 1750 ec = dictionary()->find_largest_dict(); // get largest block
jmasa@4196 1751 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
duke@435 1752 // It's a coterminal block - we can coalesce.
duke@435 1753 size_t old_size = ec->size();
duke@435 1754 coalDeath(old_size);
duke@435 1755 removeChunkFromDictionary(ec);
duke@435 1756 size += old_size;
duke@435 1757 } else {
duke@435 1758 ec = (FreeChunk*)chunk;
duke@435 1759 }
duke@435 1760 }
jmasa@3732 1761 ec->set_size(size);
duke@435 1762 debug_only(ec->mangleFreed(size));
brutisso@5166 1763 if (size < SmallForDictionary && ParallelGCThreads != 0) {
duke@435 1764 lock = _indexedFreeListParLocks[size];
duke@435 1765 }
duke@435 1766 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1767 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1768 // record the birth under the lock since the recording involves
duke@435 1769 // manipulation of the list on which the chunk lives and
duke@435 1770 // if the chunk is allocated and is the last on the list,
duke@435 1771 // the list can go away.
duke@435 1772 coalBirth(size);
duke@435 1773 }
duke@435 1774
duke@435 1775 void
duke@435 1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1777 size_t size) {
duke@435 1778 // check that the chunk does lie in this space!
duke@435 1779 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1780 assert_locked();
duke@435 1781 _bt.verify_single_block(chunk, size);
duke@435 1782
duke@435 1783 FreeChunk* fc = (FreeChunk*) chunk;
jmasa@3732 1784 fc->set_size(size);
duke@435 1785 debug_only(fc->mangleFreed(size));
duke@435 1786 if (size < SmallForDictionary) {
duke@435 1787 returnChunkToFreeList(fc);
duke@435 1788 } else {
duke@435 1789 returnChunkToDictionary(fc);
duke@435 1790 }
duke@435 1791 }
duke@435 1792
duke@435 1793 void
duke@435 1794 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1795 size_t size, bool coalesced) {
duke@435 1796 assert_locked();
duke@435 1797 assert(chunk != NULL, "null chunk");
duke@435 1798 if (coalesced) {
duke@435 1799 // repair BOT
duke@435 1800 _bt.single_block(chunk, size);
duke@435 1801 }
duke@435 1802 addChunkToFreeLists(chunk, size);
duke@435 1803 }
duke@435 1804
duke@435 1805 // We _must_ find the purported chunk on our free lists;
duke@435 1806 // we assert if we don't.
duke@435 1807 void
duke@435 1808 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1809 size_t size = fc->size();
duke@435 1810 assert_locked();
duke@435 1811 debug_only(verifyFreeLists());
duke@435 1812 if (size < SmallForDictionary) {
duke@435 1813 removeChunkFromIndexedFreeList(fc);
duke@435 1814 } else {
duke@435 1815 removeChunkFromDictionary(fc);
duke@435 1816 }
duke@435 1817 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1818 debug_only(verifyFreeLists());
duke@435 1819 }
duke@435 1820
duke@435 1821 void
duke@435 1822 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1823 size_t size = fc->size();
duke@435 1824 assert_locked();
duke@435 1825 assert(fc != NULL, "null chunk");
duke@435 1826 _bt.verify_single_block((HeapWord*)fc, size);
jmasa@3732 1827 _dictionary->remove_chunk(fc);
duke@435 1828 // adjust _unallocated_block upward, as necessary
duke@435 1829 _bt.allocated((HeapWord*)fc, size);
duke@435 1830 }
duke@435 1831
duke@435 1832 void
duke@435 1833 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1834 assert_locked();
duke@435 1835 size_t size = fc->size();
duke@435 1836 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1837 NOT_PRODUCT(
duke@435 1838 if (FLSVerifyIndexTable) {
duke@435 1839 verifyIndexedFreeList(size);
duke@435 1840 }
duke@435 1841 )
jmasa@3732 1842 _indexedFreeList[size].remove_chunk(fc);
duke@435 1843 NOT_PRODUCT(
duke@435 1844 if (FLSVerifyIndexTable) {
duke@435 1845 verifyIndexedFreeList(size);
duke@435 1846 }
duke@435 1847 )
duke@435 1848 }
duke@435 1849
duke@435 1850 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1851 /* A hint is the next larger size that has a surplus.
duke@435 1852 Start search at a size large enough to guarantee that
duke@435 1853 the excess is >= MIN_CHUNK. */
duke@435 1854 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1855 if (start < IndexSetSize) {
jmasa@4196 1856 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
duke@435 1857 size_t hint = _indexedFreeList[start].hint();
duke@435 1858 while (hint < IndexSetSize) {
duke@435 1859 assert(hint % MinObjAlignment == 0, "hint should be aligned");
jmasa@4196 1860 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
duke@435 1861 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1862 // Found a list with surplus, reset original hint
duke@435 1863 // and split out a free chunk which is returned.
duke@435 1864 _indexedFreeList[start].set_hint(hint);
duke@435 1865 FreeChunk* res = getFromListGreater(fl, numWords);
jmasa@3732 1866 assert(res == NULL || res->is_free(),
duke@435 1867 "Should be returning a free chunk");
duke@435 1868 return res;
duke@435 1869 }
duke@435 1870 hint = fl->hint(); /* keep looking */
duke@435 1871 }
duke@435 1872 /* None found. */
duke@435 1873 it[start].set_hint(IndexSetSize);
duke@435 1874 }
duke@435 1875 return NULL;
duke@435 1876 }
duke@435 1877
duke@435 1878 /* Requires fl->size >= numWords + MinChunkSize */
jmasa@4196 1879 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
duke@435 1880 size_t numWords) {
duke@435 1881 FreeChunk *curr = fl->head();
duke@435 1882 size_t oldNumWords = curr->size();
duke@435 1883 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1884 assert(curr != NULL, "List is empty");
duke@435 1885 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1886 "Size of chunks in the list is too small");
duke@435 1887
jmasa@3732 1888 fl->remove_chunk(curr);
duke@435 1889 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1890 // smallSplit(oldNumWords, numWords);
duke@435 1891 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1892 // Does anything have to be done for the remainder in terms of
duke@435 1893 // fixing the card table?
jmasa@3732 1894 assert(new_chunk == NULL || new_chunk->is_free(),
duke@435 1895 "Should be returning a free chunk");
duke@435 1896 return new_chunk;
duke@435 1897 }
duke@435 1898
duke@435 1899 FreeChunk*
duke@435 1900 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1901 size_t new_size) {
duke@435 1902 assert_locked();
duke@435 1903 size_t size = chunk->size();
duke@435 1904 assert(size > new_size, "Split from a smaller block?");
duke@435 1905 assert(is_aligned(chunk), "alignment problem");
duke@435 1906 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1907 size_t rem_size = size - new_size;
duke@435 1908 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1909 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1910 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1911 assert(is_aligned(ffc), "alignment problem");
jmasa@3732 1912 ffc->set_size(rem_size);
jmasa@3732 1913 ffc->link_next(NULL);
jmasa@3732 1914 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1915 // Above must occur before BOT is updated below.
duke@435 1916 // adjust block offset table
ysr@2071 1917 OrderAccess::storestore();
jmasa@3732 1918 assert(chunk->is_free() && ffc->is_free(), "Error");
duke@435 1919 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1920 if (rem_size < SmallForDictionary) {
duke@435 1921 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1922 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1923 assert(!is_par ||
jmasa@3294 1924 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1925 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1926 returnChunkToFreeList(ffc);
duke@435 1927 split(size, rem_size);
duke@435 1928 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1929 } else {
duke@435 1930 returnChunkToDictionary(ffc);
duke@435 1931 split(size ,rem_size);
duke@435 1932 }
jmasa@3732 1933 chunk->set_size(new_size);
duke@435 1934 return chunk;
duke@435 1935 }
duke@435 1936
duke@435 1937 void
duke@435 1938 CompactibleFreeListSpace::sweep_completed() {
duke@435 1939 // Now that space is probably plentiful, refill linear
duke@435 1940 // allocation blocks as needed.
duke@435 1941 refillLinearAllocBlocksIfNeeded();
duke@435 1942 }
duke@435 1943
duke@435 1944 void
duke@435 1945 CompactibleFreeListSpace::gc_prologue() {
duke@435 1946 assert_locked();
duke@435 1947 if (PrintFLSStatistics != 0) {
duke@435 1948 gclog_or_tty->print("Before GC:\n");
duke@435 1949 reportFreeListStatistics();
duke@435 1950 }
duke@435 1951 refillLinearAllocBlocksIfNeeded();
duke@435 1952 }
duke@435 1953
duke@435 1954 void
duke@435 1955 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1956 assert_locked();
duke@435 1957 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1958 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1959 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1960 }
duke@435 1961 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1962 _promoInfo.stopTrackingPromotions();
duke@435 1963 repairLinearAllocationBlocks();
duke@435 1964 // Print Space's stats
duke@435 1965 if (PrintFLSStatistics != 0) {
duke@435 1966 gclog_or_tty->print("After GC:\n");
duke@435 1967 reportFreeListStatistics();
duke@435 1968 }
duke@435 1969 }
duke@435 1970
duke@435 1971 // Iteration support, mostly delegated from a CMS generation
duke@435 1972
duke@435 1973 void CompactibleFreeListSpace::save_marks() {
ysr@2825 1974 assert(Thread::current()->is_VM_thread(),
ysr@2825 1975 "Global variable should only be set when single-threaded");
ysr@2825 1976 // Mark the "end" of the used space at the time of this call;
duke@435 1977 // note, however, that promoted objects from this point
duke@435 1978 // on are tracked in the _promoInfo below.
ysr@2071 1979 set_saved_mark_word(unallocated_block());
ysr@2825 1980 #ifdef ASSERT
ysr@2825 1981 // Check the sanity of save_marks() etc.
ysr@2825 1982 MemRegion ur = used_region();
ysr@2825 1983 MemRegion urasm = used_region_at_save_marks();
ysr@2825 1984 assert(ur.contains(urasm),
ysr@2825 1985 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 1986 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
drchase@6680 1987 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
ysr@2825 1988 #endif
duke@435 1989 // inform allocator that promotions should be tracked.
duke@435 1990 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1991 _promoInfo.startTrackingPromotions();
duke@435 1992 }
duke@435 1993
duke@435 1994 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 1995 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 1996 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 1997 "Shouldn't be called if using parallel gc.");
duke@435 1998 return _promoInfo.noPromotions();
duke@435 1999 }
duke@435 2000
duke@435 2001 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2002 \
duke@435 2003 void CompactibleFreeListSpace:: \
duke@435 2004 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2005 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2006 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2007 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2008 /* \
duke@435 2009 * This also restores any displaced headers and removes the elements from \
duke@435 2010 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2011 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2012 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2013 */ \
duke@435 2014 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2015 }
duke@435 2016
duke@435 2017 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2018
ysr@447 2019 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2020 return _smallLinearAllocBlock._word_size == 0;
duke@435 2021 }
duke@435 2022
duke@435 2023 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2024 // Fix up linear allocation blocks to look like free blocks
duke@435 2025 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2026 }
duke@435 2027
duke@435 2028 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2029 assert_locked();
duke@435 2030 if (blk->_ptr != NULL) {
duke@435 2031 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2032 "Minimum block size requirement");
duke@435 2033 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
jmasa@3732 2034 fc->set_size(blk->_word_size);
jmasa@3732 2035 fc->link_prev(NULL); // mark as free
duke@435 2036 fc->dontCoalesce();
jmasa@3732 2037 assert(fc->is_free(), "just marked it free");
duke@435 2038 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2039 }
duke@435 2040 }
duke@435 2041
duke@435 2042 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2043 assert_locked();
duke@435 2044 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2045 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2046 "Size of linAB should be zero if the ptr is NULL");
duke@435 2047 // Reset the linAB refill and allocation size limit.
duke@435 2048 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2049 }
duke@435 2050 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2051 }
duke@435 2052
duke@435 2053 void
duke@435 2054 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2055 assert_locked();
duke@435 2056 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2057 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2058 "blk invariant");
duke@435 2059 if (blk->_ptr == NULL) {
duke@435 2060 refillLinearAllocBlock(blk);
duke@435 2061 }
duke@435 2062 if (PrintMiscellaneous && Verbose) {
duke@435 2063 if (blk->_word_size == 0) {
duke@435 2064 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2065 }
duke@435 2066 }
duke@435 2067 }
duke@435 2068
duke@435 2069 void
duke@435 2070 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2071 assert_locked();
duke@435 2072 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2073 "linear allocation block should be empty");
duke@435 2074 FreeChunk* fc;
duke@435 2075 if (blk->_refillSize < SmallForDictionary &&
duke@435 2076 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2077 // A linAB's strategy might be to use small sizes to reduce
duke@435 2078 // fragmentation but still get the benefits of allocation from a
duke@435 2079 // linAB.
duke@435 2080 } else {
duke@435 2081 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2082 }
duke@435 2083 if (fc != NULL) {
duke@435 2084 blk->_ptr = (HeapWord*)fc;
duke@435 2085 blk->_word_size = fc->size();
duke@435 2086 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2087 }
duke@435 2088 }
duke@435 2089
ysr@447 2090 // Support for concurrent collection policy decisions.
ysr@447 2091 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2092 // In the future we might want to add in frgamentation stats --
ysr@447 2093 // including erosion of the "mountain" into this decision as well.
ysr@447 2094 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2095 }
ysr@447 2096
duke@435 2097 // Support for compaction
duke@435 2098
duke@435 2099 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2100 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2101 // prepare_for_compaction() uses the space between live objects
duke@435 2102 // so that later phase can skip dead space quickly. So verification
duke@435 2103 // of the free lists doesn't work after.
duke@435 2104 }
duke@435 2105
duke@435 2106 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2107 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2108
duke@435 2109 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2110 // In other versions of adjust_pointers(), a bail out
duke@435 2111 // based on the amount of live data in the generation
duke@435 2112 // (i.e., if 0, bail out) may be used.
duke@435 2113 // Cannot test used() == 0 here because the free lists have already
duke@435 2114 // been mangled by the compaction.
duke@435 2115
duke@435 2116 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2117 // See note about verification in prepare_for_compaction().
duke@435 2118 }
duke@435 2119
duke@435 2120 void CompactibleFreeListSpace::compact() {
duke@435 2121 SCAN_AND_COMPACT(obj_size);
duke@435 2122 }
duke@435 2123
duke@435 2124 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2125 // where fbs is free block sizes
duke@435 2126 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2127 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2128 double frag = 0.0;
duke@435 2129 size_t i;
duke@435 2130
duke@435 2131 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2132 double sz = i;
duke@435 2133 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2134 }
duke@435 2135
duke@435 2136 double totFree = itabFree +
jmasa@3732 2137 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
duke@435 2138 if (totFree > 0) {
duke@435 2139 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2140 (totFree * totFree));
duke@435 2141 frag = (double)1.0 - frag;
duke@435 2142 } else {
duke@435 2143 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2144 }
duke@435 2145 return frag;
duke@435 2146 }
duke@435 2147
duke@435 2148 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2149 float inter_sweep_current,
ysr@1580 2150 float inter_sweep_estimate,
ysr@1580 2151 float intra_sweep_estimate) {
duke@435 2152 assert_locked();
duke@435 2153 size_t i;
duke@435 2154 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2155 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
ysr@1580 2156 if (PrintFLSStatistics > 1) {
drchase@6680 2157 gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
ysr@1580 2158 }
ysr@1580 2159 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
jmasa@3732 2160 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
jmasa@3732 2161 fl->set_before_sweep(fl->count());
jmasa@3732 2162 fl->set_bfr_surp(fl->surplus());
duke@435 2163 }
jmasa@3732 2164 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
duke@435 2165 inter_sweep_current,
ysr@1580 2166 inter_sweep_estimate,
ysr@1580 2167 intra_sweep_estimate);
duke@435 2168 }
duke@435 2169
duke@435 2170 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2171 assert_locked();
duke@435 2172 size_t i;
duke@435 2173 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2174 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2175 fl->set_surplus(fl->count() -
ysr@1580 2176 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2177 }
duke@435 2178 }
duke@435 2179
duke@435 2180 void CompactibleFreeListSpace::setFLHints() {
duke@435 2181 assert_locked();
duke@435 2182 size_t i;
duke@435 2183 size_t h = IndexSetSize;
duke@435 2184 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
jmasa@4196 2185 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2186 fl->set_hint(h);
duke@435 2187 if (fl->surplus() > 0) {
duke@435 2188 h = i;
duke@435 2189 }
duke@435 2190 }
duke@435 2191 }
duke@435 2192
duke@435 2193 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2194 assert_locked();
ysr@3264 2195 size_t i;
duke@435 2196 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2197 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2198 fl->set_prev_sweep(fl->count());
jmasa@3732 2199 fl->set_coal_births(0);
jmasa@3732 2200 fl->set_coal_deaths(0);
jmasa@3732 2201 fl->set_split_births(0);
jmasa@3732 2202 fl->set_split_deaths(0);
duke@435 2203 }
duke@435 2204 }
duke@435 2205
ysr@447 2206 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2207 if (PrintFLSStatistics > 0) {
jmasa@3732 2208 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
ysr@1580 2209 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
drchase@6680 2210 p2i(largestAddr));
ysr@1580 2211 }
duke@435 2212 setFLSurplus();
duke@435 2213 setFLHints();
duke@435 2214 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2215 printFLCensus(sweep_count);
duke@435 2216 }
duke@435 2217 clearFLCensus();
duke@435 2218 assert_locked();
jmasa@3732 2219 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
duke@435 2220 }
duke@435 2221
duke@435 2222 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2223 if (size < SmallForDictionary) {
jmasa@4196 2224 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2225 return (fl->coal_desired() < 0) ||
jmasa@3732 2226 ((int)fl->count() > fl->coal_desired());
duke@435 2227 } else {
jmasa@3732 2228 return dictionary()->coal_dict_over_populated(size);
duke@435 2229 }
duke@435 2230 }
duke@435 2231
duke@435 2232 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2233 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2234 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2235 fl->increment_coal_births();
duke@435 2236 fl->increment_surplus();
duke@435 2237 }
duke@435 2238
duke@435 2239 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2240 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2241 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2242 fl->increment_coal_deaths();
duke@435 2243 fl->decrement_surplus();
duke@435 2244 }
duke@435 2245
duke@435 2246 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2247 if (size < SmallForDictionary) {
duke@435 2248 smallCoalBirth(size);
duke@435 2249 } else {
jmasa@4196 2250 dictionary()->dict_census_update(size,
duke@435 2251 false /* split */,
duke@435 2252 true /* birth */);
duke@435 2253 }
duke@435 2254 }
duke@435 2255
duke@435 2256 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2257 if(size < SmallForDictionary) {
duke@435 2258 smallCoalDeath(size);
duke@435 2259 } else {
jmasa@4196 2260 dictionary()->dict_census_update(size,
duke@435 2261 false /* split */,
duke@435 2262 false /* birth */);
duke@435 2263 }
duke@435 2264 }
duke@435 2265
duke@435 2266 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2267 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2268 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2269 fl->increment_split_births();
duke@435 2270 fl->increment_surplus();
duke@435 2271 }
duke@435 2272
duke@435 2273 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2274 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2275 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2276 fl->increment_split_deaths();
duke@435 2277 fl->decrement_surplus();
duke@435 2278 }
duke@435 2279
jmasa@3732 2280 void CompactibleFreeListSpace::split_birth(size_t size) {
duke@435 2281 if (size < SmallForDictionary) {
duke@435 2282 smallSplitBirth(size);
duke@435 2283 } else {
jmasa@4196 2284 dictionary()->dict_census_update(size,
duke@435 2285 true /* split */,
duke@435 2286 true /* birth */);
duke@435 2287 }
duke@435 2288 }
duke@435 2289
duke@435 2290 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2291 if (size < SmallForDictionary) {
duke@435 2292 smallSplitDeath(size);
duke@435 2293 } else {
jmasa@4196 2294 dictionary()->dict_census_update(size,
duke@435 2295 true /* split */,
duke@435 2296 false /* birth */);
duke@435 2297 }
duke@435 2298 }
duke@435 2299
duke@435 2300 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2301 size_t to2 = from - to1;
duke@435 2302 splitDeath(from);
jmasa@3732 2303 split_birth(to1);
jmasa@3732 2304 split_birth(to2);
duke@435 2305 }
duke@435 2306
duke@435 2307 void CompactibleFreeListSpace::print() const {
ysr@2294 2308 print_on(tty);
duke@435 2309 }
duke@435 2310
duke@435 2311 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2312 assert_locked();
duke@435 2313 repairLinearAllocationBlocks();
duke@435 2314 // Verify that the SpoolBlocks look like free blocks of
duke@435 2315 // appropriate sizes... To be done ...
duke@435 2316 }
duke@435 2317
duke@435 2318 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2319 private:
duke@435 2320 const CompactibleFreeListSpace* _sp;
duke@435 2321 const MemRegion _span;
ysr@2071 2322 HeapWord* _last_addr;
ysr@2071 2323 size_t _last_size;
ysr@2071 2324 bool _last_was_obj;
ysr@2071 2325 bool _last_was_live;
duke@435 2326
duke@435 2327 public:
duke@435 2328 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2329 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2330 _last_addr(NULL), _last_size(0),
ysr@2071 2331 _last_was_obj(false), _last_was_live(false) { }
duke@435 2332
coleenp@548 2333 virtual size_t do_blk(HeapWord* addr) {
duke@435 2334 size_t res;
ysr@2071 2335 bool was_obj = false;
ysr@2071 2336 bool was_live = false;
duke@435 2337 if (_sp->block_is_obj(addr)) {
ysr@2071 2338 was_obj = true;
duke@435 2339 oop p = oop(addr);
duke@435 2340 guarantee(p->is_oop(), "Should be an oop");
duke@435 2341 res = _sp->adjustObjectSize(p->size());
duke@435 2342 if (_sp->obj_is_alive(addr)) {
ysr@2071 2343 was_live = true;
duke@435 2344 p->verify();
duke@435 2345 }
duke@435 2346 } else {
duke@435 2347 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2348 res = fc->size();
duke@435 2349 if (FLSVerifyLists && !fc->cantCoalesce()) {
jmasa@3732 2350 guarantee(_sp->verify_chunk_in_free_list(fc),
duke@435 2351 "Chunk should be on a free list");
duke@435 2352 }
duke@435 2353 }
ysr@2071 2354 if (res == 0) {
ysr@2071 2355 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2356 gclog_or_tty->print_cr(
ysr@2071 2357 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2358 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
drchase@6680 2359 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
drchase@6680 2360 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2361 _sp->print_on(gclog_or_tty);
ysr@2071 2362 guarantee(false, "Seppuku!");
ysr@2071 2363 }
ysr@2071 2364 _last_addr = addr;
ysr@2071 2365 _last_size = res;
ysr@2071 2366 _last_was_obj = was_obj;
ysr@2071 2367 _last_was_live = was_live;
duke@435 2368 return res;
duke@435 2369 }
duke@435 2370 };
duke@435 2371
duke@435 2372 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2373 private:
duke@435 2374 const CMSCollector* _collector;
duke@435 2375 const CompactibleFreeListSpace* _sp;
duke@435 2376 const MemRegion _span;
duke@435 2377 const bool _past_remark;
duke@435 2378 const CMSBitMap* _bit_map;
duke@435 2379
coleenp@548 2380 protected:
coleenp@548 2381 void do_oop(void* p, oop obj) {
coleenp@548 2382 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2383 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2384 // Should be a valid object; the first disjunct below allows
coleenp@548 2385 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2386 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2387 // are spanned by _span (CMS heap) above.
coleenp@548 2388 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2389 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2390 "Should be an object");
coleenp@548 2391 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2392 obj->verify();
coleenp@548 2393 if (_past_remark) {
coleenp@548 2394 // Remark has been completed, the object should be marked
coleenp@548 2395 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2396 }
coleenp@548 2397 } else { // reference within CMS heap
coleenp@548 2398 if (_past_remark) {
coleenp@548 2399 // Remark has been completed -- so the referent should have
coleenp@548 2400 // been marked, if referring object is.
coleenp@548 2401 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2402 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2403 }
coleenp@548 2404 }
coleenp@548 2405 }
coleenp@548 2406 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2407 // the reference is from FLS, and points out of FLS
coleenp@548 2408 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2409 obj->verify();
coleenp@548 2410 }
coleenp@548 2411 }
coleenp@548 2412
coleenp@548 2413 template <class T> void do_oop_work(T* p) {
coleenp@548 2414 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2415 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2416 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2417 do_oop(p, obj);
coleenp@548 2418 }
coleenp@548 2419 }
coleenp@548 2420
duke@435 2421 public:
duke@435 2422 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2423 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2424 bool past_remark, CMSBitMap* bit_map) :
coleenp@4037 2425 _collector(collector), _sp(sp), _span(span),
duke@435 2426 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2427
coleenp@548 2428 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2429 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2430 };
duke@435 2431
brutisso@3711 2432 void CompactibleFreeListSpace::verify() const {
duke@435 2433 assert_lock_strong(&_freelistLock);
duke@435 2434 verify_objects_initialized();
duke@435 2435 MemRegion span = _collector->_span;
duke@435 2436 bool past_remark = (_collector->abstract_state() ==
duke@435 2437 CMSCollector::Sweeping);
duke@435 2438
duke@435 2439 ResourceMark rm;
duke@435 2440 HandleMark hm;
duke@435 2441
duke@435 2442 // Check integrity of CFL data structures
duke@435 2443 _promoInfo.verify();
duke@435 2444 _dictionary->verify();
duke@435 2445 if (FLSVerifyIndexTable) {
duke@435 2446 verifyIndexedFreeLists();
duke@435 2447 }
duke@435 2448 // Check integrity of all objects and free blocks in space
duke@435 2449 {
duke@435 2450 VerifyAllBlksClosure cl(this, span);
duke@435 2451 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2452 }
duke@435 2453 // Check that all references in the heap to FLS
duke@435 2454 // are to valid objects in FLS or that references in
duke@435 2455 // FLS are to valid objects elsewhere in the heap
duke@435 2456 if (FLSVerifyAllHeapReferences)
duke@435 2457 {
duke@435 2458 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2459 _collector->markBitMap());
duke@435 2460 CollectedHeap* ch = Universe::heap();
coleenp@4037 2461
coleenp@4037 2462 // Iterate over all oops in the heap. Uses the _no_header version
coleenp@4037 2463 // since we are not interested in following the klass pointers.
coleenp@4037 2464 ch->oop_iterate_no_header(&cl);
duke@435 2465 }
duke@435 2466
duke@435 2467 if (VerifyObjectStartArray) {
duke@435 2468 // Verify the block offset table
duke@435 2469 _bt.verify();
duke@435 2470 }
duke@435 2471 }
duke@435 2472
duke@435 2473 #ifndef PRODUCT
duke@435 2474 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2475 if (FLSVerifyLists) {
duke@435 2476 _dictionary->verify();
duke@435 2477 verifyIndexedFreeLists();
duke@435 2478 } else {
duke@435 2479 if (FLSVerifyDictionary) {
duke@435 2480 _dictionary->verify();
duke@435 2481 }
duke@435 2482 if (FLSVerifyIndexTable) {
duke@435 2483 verifyIndexedFreeLists();
duke@435 2484 }
duke@435 2485 }
duke@435 2486 }
duke@435 2487 #endif
duke@435 2488
duke@435 2489 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2490 size_t i = 0;
ysr@3264 2491 for (; i < IndexSetStart; i++) {
duke@435 2492 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2493 }
duke@435 2494 for (; i < IndexSetSize; i++) {
duke@435 2495 verifyIndexedFreeList(i);
duke@435 2496 }
duke@435 2497 }
duke@435 2498
duke@435 2499 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2500 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2501 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2502 size_t num = _indexedFreeList[size].count();
ysr@1580 2503 size_t n = 0;
ysr@3264 2504 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2505 "Slot should have been empty");
ysr@1580 2506 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2507 guarantee(fc->size() == size, "Size inconsistency");
jmasa@3732 2508 guarantee(fc->is_free(), "!free?");
duke@435 2509 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2510 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2511 }
ysr@1580 2512 guarantee(n == num, "Incorrect count");
duke@435 2513 }
duke@435 2514
duke@435 2515 #ifndef PRODUCT
ysr@3220 2516 void CompactibleFreeListSpace::check_free_list_consistency() const {
goetz@6337 2517 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
duke@435 2518 "Some sizes can't be allocated without recourse to"
duke@435 2519 " linear allocation buffers");
goetz@6337 2520 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
duke@435 2521 "else MIN_TREE_CHUNK_SIZE is wrong");
brutisso@3807 2522 assert(IndexSetStart != 0, "IndexSetStart not initialized");
brutisso@3807 2523 assert(IndexSetStride != 0, "IndexSetStride not initialized");
duke@435 2524 }
duke@435 2525 #endif
duke@435 2526
ysr@447 2527 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2528 assert_lock_strong(&_freelistLock);
jmasa@4196 2529 AdaptiveFreeList<FreeChunk> total;
ysr@447 2530 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
jmasa@4196 2531 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
jmasa@3732 2532 size_t total_free = 0;
duke@435 2533 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2534 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2535 total_free += fl->count() * fl->size();
ysr@447 2536 if (i % (40*IndexSetStride) == 0) {
jmasa@4196 2537 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
ysr@447 2538 }
ysr@447 2539 fl->print_on(gclog_or_tty);
jmasa@3732 2540 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
ysr@447 2541 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2542 total.set_desired( total.desired() + fl->desired() );
jmasa@3732 2543 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
jmasa@3732 2544 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
ysr@447 2545 total.set_count( total.count() + fl->count() );
jmasa@3732 2546 total.set_coal_births( total.coal_births() + fl->coal_births() );
jmasa@3732 2547 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
jmasa@3732 2548 total.set_split_births(total.split_births() + fl->split_births());
jmasa@3732 2549 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
duke@435 2550 }
ysr@447 2551 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2552 gclog_or_tty->print_cr("Total free in indexed lists "
jmasa@3732 2553 SIZE_FORMAT " words", total_free);
duke@435 2554 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
jmasa@3732 2555 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
jmasa@3732 2556 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
ysr@447 2557 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
jmasa@3732 2558 _dictionary->print_dict_census();
duke@435 2559 }
duke@435 2560
ysr@1580 2561 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2562 // CFLS_LAB
ysr@1580 2563 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2564
ysr@1580 2565 #define VECTOR_257(x) \
ysr@1580 2566 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2567 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2568 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2569 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2570 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2571 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2572 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2573 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2574 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2575 x }
ysr@1580 2576
ysr@1580 2577 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2578 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2579 // command-line, this will get reinitialized via a call to
ysr@1580 2580 // modify_initialization() below.
ysr@1580 2581 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2582 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2583 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2584 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2585
duke@435 2586 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2587 _cfls(cfls)
duke@435 2588 {
ysr@1580 2589 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2590 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2591 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2592 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2593 _indexedFreeList[i].set_size(i);
ysr@1580 2594 _num_blocks[i] = 0;
ysr@1580 2595 }
ysr@1580 2596 }
ysr@1580 2597
ysr@1580 2598 static bool _CFLS_LAB_modified = false;
ysr@1580 2599
ysr@1580 2600 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2601 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2602 _CFLS_LAB_modified = true;
ysr@1580 2603 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2604 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2605 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2606 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2607 }
duke@435 2608 }
duke@435 2609
duke@435 2610 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2611 FreeChunk* res;
ysr@2132 2612 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2613 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2614 // This locking manages sync with other large object allocations.
duke@435 2615 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2616 Mutex::_no_safepoint_check_flag);
duke@435 2617 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2618 if (res == NULL) return NULL;
duke@435 2619 } else {
jmasa@4196 2620 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
duke@435 2621 if (fl->count() == 0) {
duke@435 2622 // Attempt to refill this local free list.
ysr@1580 2623 get_from_global_pool(word_sz, fl);
duke@435 2624 // If it didn't work, give up.
duke@435 2625 if (fl->count() == 0) return NULL;
duke@435 2626 }
jmasa@3732 2627 res = fl->get_chunk_at_head();
duke@435 2628 assert(res != NULL, "Why was count non-zero?");
duke@435 2629 }
duke@435 2630 res->markNotFree();
jmasa@3732 2631 assert(!res->is_free(), "shouldn't be marked free");
coleenp@622 2632 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2633 // mangle a just allocated object with a distinct pattern.
duke@435 2634 debug_only(res->mangleAllocated(word_sz));
duke@435 2635 return (HeapWord*)res;
duke@435 2636 }
duke@435 2637
ysr@1580 2638 // Get a chunk of blocks of the right size and update related
ysr@1580 2639 // book-keeping stats
jmasa@4196 2640 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
ysr@1580 2641 // Get the #blocks we want to claim
ysr@1580 2642 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2643 assert(n_blks > 0, "Error");
ysr@1580 2644 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2645 // In some cases, when the application has a phase change,
ysr@1580 2646 // there may be a sudden and sharp shift in the object survival
ysr@1580 2647 // profile, and updating the counts at the end of a scavenge
ysr@1580 2648 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2649 // during these phase changes. It is beneficial to detect such
ysr@1580 2650 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2651 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2652 // It is protected by a product flag until we have gained
ysr@1580 2653 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2654 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2655 // small spikes, so some kind of historical smoothing based on
ysr@1580 2656 // previous experience with the greater reactivity might be useful.
ysr@1580 2657 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2658 // default.
ysr@1580 2659 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2660 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2661 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2662 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2663 }
ysr@1580 2664 assert(n_blks > 0, "Error");
ysr@1580 2665 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2666 // Update stats table entry for this block size
ysr@1580 2667 _num_blocks[word_sz] += fl->count();
ysr@1580 2668 }
ysr@1580 2669
ysr@1580 2670 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2671 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2672 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2673 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2674 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2675 "Counter inconsistency");
ysr@1580 2676 if (_global_num_workers[i] > 0) {
ysr@1580 2677 // Need to smooth wrt historical average
ysr@1580 2678 if (ResizeOldPLAB) {
ysr@1580 2679 _blocks_to_claim[i].sample(
ysr@1580 2680 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2681 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2682 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2683 }
ysr@1580 2684 // Reset counters for next round
ysr@1580 2685 _global_num_workers[i] = 0;
ysr@1580 2686 _global_num_blocks[i] = 0;
ysr@1580 2687 if (PrintOldPLAB) {
drchase@6680 2688 gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2689 }
duke@435 2690 }
duke@435 2691 }
duke@435 2692 }
duke@435 2693
ysr@3220 2694 // If this is changed in the future to allow parallel
ysr@3220 2695 // access, one would need to take the FL locks and,
ysr@3220 2696 // depending on how it is used, stagger access from
ysr@3220 2697 // parallel threads to reduce contention.
ysr@1580 2698 void CFLS_LAB::retire(int tid) {
ysr@1580 2699 // We run this single threaded with the world stopped;
ysr@1580 2700 // so no need for locks and such.
ysr@1580 2701 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2702 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2703 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2704 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2705 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2706 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2707 "Can't retire more than what we obtained");
ysr@1580 2708 if (_num_blocks[i] > 0) {
ysr@1580 2709 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2710 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2711 {
ysr@3220 2712 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2713 // Mutex::_no_safepoint_check_flag);
ysr@3220 2714
ysr@1580 2715 // Update globals stats for num_blocks used
ysr@1580 2716 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2717 _global_num_workers[i]++;
jmasa@3357 2718 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2719 if (num_retire > 0) {
ysr@1580 2720 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2721 // Reset this list.
jmasa@4196 2722 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
ysr@1580 2723 _indexedFreeList[i].set_size(i);
ysr@1580 2724 }
ysr@1580 2725 }
ysr@1580 2726 if (PrintOldPLAB) {
drchase@6680 2727 gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
ysr@1580 2728 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2729 }
ysr@1580 2730 // Reset stats for next round
ysr@1580 2731 _num_blocks[i] = 0;
ysr@1580 2732 }
ysr@1580 2733 }
ysr@1580 2734 }
ysr@1580 2735
jmasa@7474 2736 // Used by par_get_chunk_of_blocks() for the chunks from the
jmasa@7474 2737 // indexed_free_lists. Looks for a chunk with size that is a multiple
jmasa@7474 2738 // of "word_sz" and if found, splits it into "word_sz" chunks and add
jmasa@7474 2739 // to the free list "fl". "n" is the maximum number of chunks to
jmasa@7474 2740 // be added to "fl".
jmasa@7474 2741 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
duke@435 2742
ysr@1580 2743 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2744 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2745 // then try getting a big chunk and splitting it.
ysr@1580 2746 {
ysr@1580 2747 bool found;
ysr@1580 2748 int k;
ysr@1580 2749 size_t cur_sz;
ysr@1580 2750 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2751 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2752 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2753 k++, cur_sz = k * word_sz) {
jmasa@4196 2754 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
ysr@1580 2755 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2756 {
ysr@1580 2757 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2758 Mutex::_no_safepoint_check_flag);
jmasa@4196 2759 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2760 if (gfl->count() != 0) {
ysr@1580 2761 // nn is the number of chunks of size cur_sz that
ysr@1580 2762 // we'd need to split k-ways each, in order to create
ysr@1580 2763 // "n" chunks of size word_sz each.
ysr@1580 2764 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2765 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2766 found = true;
ysr@1580 2767 if (k > 1) {
ysr@1580 2768 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2769 // we increment the split death count by the number of blocks
ysr@1580 2770 // we just took from the cur_sz-size blocks list and which
ysr@1580 2771 // we will be splitting below.
jmasa@3732 2772 ssize_t deaths = gfl->split_deaths() +
ysr@1580 2773 fl_for_cur_sz.count();
jmasa@3732 2774 gfl->set_split_deaths(deaths);
ysr@1580 2775 }
ysr@1580 2776 }
ysr@1580 2777 }
ysr@1580 2778 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2779 if (found) {
ysr@1580 2780 if (k == 1) {
ysr@1580 2781 fl->prepend(&fl_for_cur_sz);
ysr@1580 2782 } else {
ysr@1580 2783 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2784 FreeChunk* fc;
jmasa@3732 2785 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
ysr@1580 2786 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2787 // access the main chunk sees it as a single free block until we
ysr@1580 2788 // change it.
ysr@1580 2789 size_t fc_size = fc->size();
jmasa@3732 2790 assert(fc->is_free(), "Error");
ysr@1580 2791 for (int i = k-1; i >= 0; i--) {
ysr@1580 2792 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2793 assert((i != 0) ||
jmasa@3732 2794 ((fc == ffc) && ffc->is_free() &&
ysr@2071 2795 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2796 "Counting error");
jmasa@3732 2797 ffc->set_size(word_sz);
jmasa@3732 2798 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2799 ffc->link_next(NULL);
ysr@1580 2800 // Above must occur before BOT is updated below.
ysr@2071 2801 OrderAccess::storestore();
ysr@2071 2802 // splitting from the right, fc_size == i * word_sz
ysr@2071 2803 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2804 fc_size -= word_sz;
ysr@2071 2805 assert(fc_size == i*word_sz, "Error");
ysr@2071 2806 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2807 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2808 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2809 // Push this on "fl".
jmasa@3732 2810 fl->return_chunk_at_head(ffc);
ysr@1580 2811 }
ysr@1580 2812 // TRAP
ysr@1580 2813 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2814 }
ysr@1580 2815 }
ysr@1580 2816 // Update birth stats for this block size.
ysr@1580 2817 size_t num = fl->count();
ysr@1580 2818 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2819 Mutex::_no_safepoint_check_flag);
jmasa@3732 2820 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
jmasa@3732 2821 _indexedFreeList[word_sz].set_split_births(births);
jmasa@7474 2822 return true;
duke@435 2823 }
duke@435 2824 }
jmasa@7474 2825 return found;
duke@435 2826 }
jmasa@7474 2827 }
jmasa@7474 2828
jmasa@7474 2829 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
jmasa@7474 2830
duke@435 2831 FreeChunk* fc = NULL;
duke@435 2832 FreeChunk* rem_fc = NULL;
duke@435 2833 size_t rem;
duke@435 2834 {
duke@435 2835 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2836 Mutex::_no_safepoint_check_flag);
duke@435 2837 while (n > 0) {
jmasa@4196 2838 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
jmasa@3730 2839 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 2840 if (fc != NULL) {
duke@435 2841 break;
duke@435 2842 } else {
duke@435 2843 n--;
duke@435 2844 }
duke@435 2845 }
jmasa@7474 2846 if (fc == NULL) return NULL;
ysr@2071 2847 // Otherwise, split up that block.
ysr@1580 2848 assert((ssize_t)n >= 1, "Control point invariant");
jmasa@3732 2849 assert(fc->is_free(), "Error: should be a free block");
ysr@2071 2850 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2851 const size_t nn = fc->size() / word_sz;
duke@435 2852 n = MIN2(nn, n);
ysr@1580 2853 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2854 rem = fc->size() - n * word_sz;
duke@435 2855 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2856 if (rem > 0 && rem < MinChunkSize) {
duke@435 2857 n--; rem += word_sz;
duke@435 2858 }
jmasa@1583 2859 // Note that at this point we may have n == 0.
jmasa@1583 2860 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2861
jmasa@1583 2862 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2863 // enough to leave a viable remainder. We are unable to
jmasa@1583 2864 // allocate even one block. Return fc to the
jmasa@1583 2865 // dictionary and return, leaving "fl" empty.
jmasa@1583 2866 if (n == 0) {
jmasa@1583 2867 returnChunkToDictionary(fc);
jmasa@7474 2868 return NULL;
jmasa@1583 2869 }
jmasa@1583 2870
jmasa@7474 2871 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@7474 2872 dictionary()->dict_census_update(fc->size(),
jmasa@7474 2873 true /*split*/,
jmasa@7474 2874 false /*birth*/);
jmasa@7474 2875
duke@435 2876 // First return the remainder, if any.
duke@435 2877 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2878 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2879 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2880 // hit if the block is a small block.)
duke@435 2881 if (rem > 0) {
duke@435 2882 size_t prefix_size = n * word_sz;
duke@435 2883 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
jmasa@3732 2884 rem_fc->set_size(rem);
jmasa@3732 2885 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2886 rem_fc->link_next(NULL);
duke@435 2887 // Above must occur before BOT is updated below.
ysr@1580 2888 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2889 OrderAccess::storestore();
duke@435 2890 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
jmasa@3732 2891 assert(fc->is_free(), "Error");
jmasa@3732 2892 fc->set_size(prefix_size);
duke@435 2893 if (rem >= IndexSetSize) {
duke@435 2894 returnChunkToDictionary(rem_fc);
jmasa@4196 2895 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
duke@435 2896 rem_fc = NULL;
duke@435 2897 }
duke@435 2898 // Otherwise, return it to the small list below.
duke@435 2899 }
duke@435 2900 }
duke@435 2901 if (rem_fc != NULL) {
duke@435 2902 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2903 Mutex::_no_safepoint_check_flag);
duke@435 2904 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
jmasa@3732 2905 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
duke@435 2906 smallSplitBirth(rem);
duke@435 2907 }
jmasa@7474 2908 assert(n * word_sz == fc->size(),
jmasa@7474 2909 err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
jmasa@7474 2910 SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
jmasa@7474 2911 fc->size(), n, word_sz));
jmasa@7474 2912 return fc;
jmasa@7474 2913 }
jmasa@7474 2914
jmasa@7474 2915 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7474 2916
jmasa@7474 2917 FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
jmasa@7474 2918
jmasa@7474 2919 if (fc == NULL) {
jmasa@7474 2920 return;
jmasa@7474 2921 }
jmasa@7474 2922
jmasa@7474 2923 size_t n = fc->size() / word_sz;
jmasa@7474 2924
jmasa@7474 2925 assert((ssize_t)n > 0, "Consistency");
duke@435 2926 // Now do the splitting up.
duke@435 2927 // Must do this in reverse order, so that anybody attempting to
duke@435 2928 // access the main chunk sees it as a single free block until we
duke@435 2929 // change it.
duke@435 2930 size_t fc_size = n * word_sz;
duke@435 2931 // All but first chunk in this loop
duke@435 2932 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2933 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
jmasa@3732 2934 ffc->set_size(word_sz);
jmasa@3732 2935 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2936 ffc->link_next(NULL);
duke@435 2937 // Above must occur before BOT is updated below.
ysr@2071 2938 OrderAccess::storestore();
duke@435 2939 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2940 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2941 fc_size -= word_sz;
duke@435 2942 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2943 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2944 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2945 // Push this on "fl".
jmasa@3732 2946 fl->return_chunk_at_head(ffc);
duke@435 2947 }
duke@435 2948 // First chunk
jmasa@3732 2949 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2950 // The blocks above should show their new sizes before the first block below
jmasa@3732 2951 fc->set_size(word_sz);
jmasa@3732 2952 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
jmasa@3732 2953 fc->link_next(NULL);
duke@435 2954 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2955 _bt.verify_single_block((HeapWord*)fc, fc->size());
jmasa@3732 2956 fl->return_chunk_at_head(fc);
duke@435 2957
ysr@1580 2958 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2959 {
ysr@1580 2960 // Update the stats for this block size.
duke@435 2961 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2962 Mutex::_no_safepoint_check_flag);
jmasa@3732 2963 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
jmasa@3732 2964 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2965 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2966 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2967 }
duke@435 2968
duke@435 2969 // TRAP
duke@435 2970 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2971 }
duke@435 2972
jmasa@7474 2973 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7474 2974 assert(fl->count() == 0, "Precondition.");
jmasa@7474 2975 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
jmasa@7474 2976 "Precondition");
jmasa@7474 2977
jmasa@7474 2978 if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
jmasa@7474 2979 // Got it
jmasa@7474 2980 return;
jmasa@7474 2981 }
jmasa@7474 2982
jmasa@7474 2983 // Otherwise, we'll split a block from the dictionary.
jmasa@7474 2984 par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
jmasa@7474 2985 }
jmasa@7474 2986
duke@435 2987 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2988 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2989 // XXX Need to suitably abstract and generalize this and the next
duke@435 2990 // method into one.
duke@435 2991 void
duke@435 2992 CompactibleFreeListSpace::
duke@435 2993 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2994 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2995 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2996 const size_t task_size = rescan_task_size();
duke@435 2997 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2998 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2999 assert(n_tasks == 0 ||
ysr@775 3000 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 3001 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 3002 "n_tasks calculation incorrect");
duke@435 3003 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3004 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3005 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3006 // need to finish in order to be done).
jmasa@2188 3007 pst->set_n_threads(n_threads);
duke@435 3008 pst->set_n_tasks((int)n_tasks);
duke@435 3009 }
duke@435 3010
duke@435 3011 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3012 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 3013 void
duke@435 3014 CompactibleFreeListSpace::
duke@435 3015 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 3016 HeapWord* low) {
duke@435 3017 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3018 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3019 const size_t task_size = marking_task_size();
duke@435 3020 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3021 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3022 "Otherwise arithmetic below would be incorrect");
duke@435 3023 MemRegion span = _gen->reserved();
duke@435 3024 if (low != NULL) {
duke@435 3025 if (span.contains(low)) {
duke@435 3026 // Align low down to a card boundary so that
duke@435 3027 // we can use block_offset_careful() on span boundaries.
duke@435 3028 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3029 CardTableModRefBS::card_size);
duke@435 3030 // Clip span prefix at aligned_low
duke@435 3031 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3032 } else if (low > span.end()) {
duke@435 3033 span = MemRegion(low, low); // Null region
duke@435 3034 } // else use entire span
duke@435 3035 }
duke@435 3036 assert(span.is_empty() ||
duke@435 3037 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3038 "span should start at a card boundary");
duke@435 3039 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3040 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3041 assert(n_tasks == 0 ||
duke@435 3042 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3043 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3044 "n_tasks calculation incorrect");
duke@435 3045 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3046 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3047 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3048 // need to finish in order to be done).
jmasa@2188 3049 pst->set_n_threads(n_threads);
duke@435 3050 pst->set_n_tasks((int)n_tasks);
duke@435 3051 }

mercurial