src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Sat, 24 Oct 2020 16:43:47 +0800

author
aoqi
date
Sat, 24 Oct 2020 16:43:47 +0800
changeset 10015
eb7ce841ccec
parent 9806
758c07667682
parent 9975
184f430ac1a2
permissions
-rw-r--r--

Merge

duke@435 1 /*
fyang@9975 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
coleenp@4037 32 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
goetz@6912 36 #include "memory/space.inline.hpp"
stefank@2314 37 #include "memory/universe.inline.hpp"
stefank@2314 38 #include "oops/oop.inline.hpp"
stefank@2314 39 #include "runtime/globals.hpp"
stefank@2314 40 #include "runtime/handles.inline.hpp"
stefank@2314 41 #include "runtime/init.hpp"
stefank@2314 42 #include "runtime/java.hpp"
goetz@6911 43 #include "runtime/orderAccess.inline.hpp"
stefank@2314 44 #include "runtime/vmThread.hpp"
stefank@2314 45 #include "utilities/copy.hpp"
duke@435 46
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48 //// CompactibleFreeListSpace
duke@435 49 /////////////////////////////////////////////////////////////////////////
duke@435 50
duke@435 51 // highest ranked free list lock rank
duke@435 52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 53
kvn@1926 54 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 55 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 57
kvn@1926 58 size_t MinChunkSize = 0;
kvn@1926 59
kvn@1926 60 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 61 // Set CMS global values
kvn@1926 62 assert(MinChunkSize == 0, "already set");
brutisso@3807 63
brutisso@3807 64 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
brutisso@3807 65 // for chunks to contain a FreeChunk.
brutisso@3807 66 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
brutisso@3807 67 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
kvn@1926 68
kvn@1926 69 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 70 IndexSetStart = MinChunkSize;
kvn@1926 71 IndexSetStride = MinObjAlignment;
kvn@1926 72 }
kvn@1926 73
duke@435 74 // Constructor
duke@435 75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 76 MemRegion mr, bool use_adaptive_freelists,
jmasa@3730 77 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@435 78 _dictionaryChoice(dictionaryChoice),
duke@435 79 _adaptive_freelists(use_adaptive_freelists),
duke@435 80 _bt(bs, mr),
duke@435 81 // free list locks are in the range of values taken by _lockRank
duke@435 82 // This range currently is [_leaf+2, _leaf+3]
duke@435 83 // Note: this requires that CFLspace c'tors
duke@435 84 // are called serially in the order in which the locks are
duke@435 85 // are acquired in the program text. This is true today.
duke@435 86 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 87 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 88 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 89 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 90 CMSRescanMultiple),
duke@435 91 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 92 CMSConcMarkMultiple),
duke@435 93 _collector(NULL)
duke@435 94 {
jmasa@3730 95 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
jmasa@4196 96 "FreeChunk is larger than expected");
duke@435 97 _bt.set_space(this);
jmasa@698 98 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 99 // We have all of "mr", all of which we place in the dictionary
duke@435 100 // as one big chunk. We'll need to decide here which of several
duke@435 101 // possible alternative dictionary implementations to use. For
duke@435 102 // now the choice is easy, since we have only one working
duke@435 103 // implementation, namely, the simple binary tree (splaying
duke@435 104 // temporarily disabled).
duke@435 105 switch (dictionaryChoice) {
jmasa@4196 106 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
jmasa@4488 107 _dictionary = new AFLBinaryTreeDictionary(mr);
jmasa@4196 108 break;
jmasa@3730 109 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
jmasa@3730 110 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
duke@435 111 default:
duke@435 112 warning("dictionaryChoice: selected option not understood; using"
duke@435 113 " default BinaryTreeDictionary implementation instead.");
duke@435 114 }
duke@435 115 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 116 // The indexed free lists are initially all empty and are lazily
duke@435 117 // filled in on demand. Initialize the array elements to NULL.
duke@435 118 initializeIndexedFreeListArray();
duke@435 119
duke@435 120 // Not using adaptive free lists assumes that allocation is first
duke@435 121 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 122 // has to have the klass's klassKlass allocated at a lower
duke@435 123 // address in the heap than the klass so that the klassKlass is
duke@435 124 // moved to its new location before the klass is moved.
duke@435 125 // Set the _refillSize for the linear allocation blocks
duke@435 126 if (!use_adaptive_freelists) {
jmasa@4488 127 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
jmasa@4488 128 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 129 // The small linAB initially has all the space and will allocate
duke@435 130 // a chunk of any size.
duke@435 131 HeapWord* addr = (HeapWord*) fc;
duke@435 132 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 133 1024*SmallForLinearAlloc, fc->size());
duke@435 134 // Note that _unallocated_block is not updated here.
duke@435 135 // Allocations from the linear allocation block should
duke@435 136 // update it.
duke@435 137 } else {
duke@435 138 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 139 SmallForLinearAlloc);
duke@435 140 }
duke@435 141 // CMSIndexedFreeListReplenish should be at least 1
duke@435 142 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 143 _promoInfo.setSpace(this);
duke@435 144 if (UseCMSBestFit) {
duke@435 145 _fitStrategy = FreeBlockBestFitFirst;
duke@435 146 } else {
duke@435 147 _fitStrategy = FreeBlockStrategyNone;
duke@435 148 }
ysr@3220 149 check_free_list_consistency();
duke@435 150
duke@435 151 // Initialize locks for parallel case.
jmasa@2188 152
jmasa@2188 153 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 154 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 155 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 156 "a freelist par lock",
duke@435 157 true);
duke@435 158 DEBUG_ONLY(
duke@435 159 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 160 )
duke@435 161 }
duke@435 162 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 163 }
zgu@9793 164
zgu@9793 165 _used_stable = 0;
duke@435 166 }
duke@435 167
duke@435 168 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 169 // update the block offset table. Removed initialize_threshold call because
duke@435 170 // CFLS does not use a block offset array for contiguous spaces.
duke@435 171 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 172 CompactPoint* cp, HeapWord* compact_top) {
duke@435 173 // q is alive
duke@435 174 // First check if we should switch compaction space
duke@435 175 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 176 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 177 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 178 "virtual adjustObjectSize_v() method is not correct");
duke@435 179 size_t adjusted_size = adjustObjectSize(size);
duke@435 180 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 181 "no small fragments allowed");
duke@435 182 assert(minimum_free_block_size() == MinChunkSize,
duke@435 183 "for de-virtualized reference below");
duke@435 184 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 185 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 186 adjusted_size != compaction_max_size) {
duke@435 187 do {
duke@435 188 // switch to next compaction space
duke@435 189 cp->space->set_compaction_top(compact_top);
duke@435 190 cp->space = cp->space->next_compaction_space();
duke@435 191 if (cp->space == NULL) {
duke@435 192 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 193 assert(cp->gen != NULL, "compaction must succeed");
duke@435 194 cp->space = cp->gen->first_compaction_space();
duke@435 195 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 196 }
duke@435 197 compact_top = cp->space->bottom();
duke@435 198 cp->space->set_compaction_top(compact_top);
duke@435 199 // The correct adjusted_size may not be the same as that for this method
duke@435 200 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 201 // Use the virtual method which is not used above to save the virtual
duke@435 202 // dispatch.
duke@435 203 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 204 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 205 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 206 } while (adjusted_size > compaction_max_size);
duke@435 207 }
duke@435 208
duke@435 209 // store the forwarding pointer into the mark word
duke@435 210 if ((HeapWord*)q != compact_top) {
duke@435 211 q->forward_to(oop(compact_top));
duke@435 212 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 213 } else {
duke@435 214 // if the object isn't moving we can just set the mark to the default
duke@435 215 // mark and handle it specially later on.
duke@435 216 q->init_mark();
duke@435 217 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 218 }
duke@435 219
duke@435 220 compact_top += adjusted_size;
duke@435 221
duke@435 222 // we need to update the offset table so that the beginnings of objects can be
duke@435 223 // found during scavenge. Note that we are updating the offset table based on
duke@435 224 // where the object will be once the compaction phase finishes.
duke@435 225
duke@435 226 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 227 // the compaction_top exceeds the current threshold but not for an
duke@435 228 // non-contiguous space.
duke@435 229 cp->threshold =
duke@435 230 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 231 return compact_top;
duke@435 232 }
duke@435 233
duke@435 234 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 235 // and use of single_block instead of alloc_block. The name here is not really
duke@435 236 // appropriate - maybe a more general name could be invented for both the
duke@435 237 // contiguous and noncontiguous spaces.
duke@435 238
duke@435 239 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 240 _bt.single_block(start, the_end);
duke@435 241 return end();
duke@435 242 }
duke@435 243
duke@435 244 // Initialize them to NULL.
duke@435 245 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 246 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 247 // Note that on platforms where objects are double word aligned,
duke@435 248 // the odd array elements are not used. It is convenient, however,
duke@435 249 // to map directly from the object size to the array element.
duke@435 250 _indexedFreeList[i].reset(IndexSetSize);
duke@435 251 _indexedFreeList[i].set_size(i);
duke@435 252 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 253 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 254 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 255 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 256 }
duke@435 257 }
duke@435 258
duke@435 259 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 260 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 261 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 262 "Indexed free list sizes are incorrect");
duke@435 263 _indexedFreeList[i].reset(IndexSetSize);
duke@435 264 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 265 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 266 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 267 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 268 }
duke@435 269 }
duke@435 270
duke@435 271 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 272 resetIndexedFreeListArray();
duke@435 273 dictionary()->reset();
duke@435 274 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 275 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 276 // Everything's allocated until proven otherwise.
duke@435 277 _bt.set_unallocated_block(end());
duke@435 278 }
duke@435 279 if (!mr.is_empty()) {
duke@435 280 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 281 _bt.single_block(mr.start(), mr.word_size());
duke@435 282 FreeChunk* fc = (FreeChunk*) mr.start();
jmasa@3732 283 fc->set_size(mr.word_size());
duke@435 284 if (mr.word_size() >= IndexSetSize ) {
duke@435 285 returnChunkToDictionary(fc);
duke@435 286 } else {
duke@435 287 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
jmasa@3732 288 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
duke@435 289 }
brutisso@5163 290 coalBirth(mr.word_size());
duke@435 291 }
duke@435 292 _promoInfo.reset();
duke@435 293 _smallLinearAllocBlock._ptr = NULL;
duke@435 294 _smallLinearAllocBlock._word_size = 0;
duke@435 295 }
duke@435 296
duke@435 297 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 298 // Reset the space to the new reality - one free chunk.
duke@435 299 MemRegion mr(compaction_top(), end());
duke@435 300 reset(mr);
duke@435 301 // Now refill the linear allocation block(s) if possible.
duke@435 302 if (_adaptive_freelists) {
duke@435 303 refillLinearAllocBlocksIfNeeded();
duke@435 304 } else {
duke@435 305 // Place as much of mr in the linAB as we can get,
duke@435 306 // provided it was big enough to go into the dictionary.
jmasa@3732 307 FreeChunk* fc = dictionary()->find_largest_dict();
duke@435 308 if (fc != NULL) {
duke@435 309 assert(fc->size() == mr.word_size(),
duke@435 310 "Why was the chunk broken up?");
duke@435 311 removeChunkFromDictionary(fc);
duke@435 312 HeapWord* addr = (HeapWord*) fc;
duke@435 313 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 314 1024*SmallForLinearAlloc, fc->size());
duke@435 315 // Note that _unallocated_block is not updated here.
duke@435 316 }
duke@435 317 }
duke@435 318 }
duke@435 319
duke@435 320 // Walks the entire dictionary, returning a coterminal
duke@435 321 // chunk, if it exists. Use with caution since it involves
duke@435 322 // a potentially complete walk of a potentially large tree.
duke@435 323 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 324
duke@435 325 assert_lock_strong(&_freelistLock);
duke@435 326
duke@435 327 return dictionary()->find_chunk_ends_at(end());
duke@435 328 }
duke@435 329
duke@435 330
duke@435 331 #ifndef PRODUCT
duke@435 332 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 333 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 334 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
duke@435 335 }
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 339 size_t sum = 0;
duke@435 340 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 341 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
duke@435 342 }
duke@435 343 return sum;
duke@435 344 }
duke@435 345
duke@435 346 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 347 size_t count = 0;
ysr@3264 348 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 349 debug_only(
duke@435 350 ssize_t total_list_count = 0;
duke@435 351 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 352 fc = fc->next()) {
duke@435 353 total_list_count++;
duke@435 354 }
duke@435 355 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 356 "Count in list is incorrect");
duke@435 357 )
duke@435 358 count += _indexedFreeList[i].count();
duke@435 359 }
duke@435 360 return count;
duke@435 361 }
duke@435 362
duke@435 363 size_t CompactibleFreeListSpace::totalCount() {
duke@435 364 size_t num = totalCountInIndexedFreeLists();
jmasa@3732 365 num += dictionary()->total_count();
duke@435 366 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 367 num++;
duke@435 368 }
duke@435 369 return num;
duke@435 370 }
duke@435 371 #endif
duke@435 372
duke@435 373 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 374 FreeChunk* fc = (FreeChunk*) p;
jmasa@3732 375 return fc->is_free();
duke@435 376 }
duke@435 377
duke@435 378 size_t CompactibleFreeListSpace::used() const {
duke@435 379 return capacity() - free();
duke@435 380 }
duke@435 381
zgu@9793 382 size_t CompactibleFreeListSpace::used_stable() const {
zgu@9793 383 return _used_stable;
zgu@9793 384 }
zgu@9793 385
zgu@9793 386 void CompactibleFreeListSpace::recalculate_used_stable() {
zgu@9793 387 _used_stable = used();
zgu@9793 388 }
zgu@9793 389
duke@435 390 size_t CompactibleFreeListSpace::free() const {
duke@435 391 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 392 // if you do this while the structures are in flux you
duke@435 393 // may get an approximate answer only; for instance
duke@435 394 // because there is concurrent allocation either
duke@435 395 // directly by mutators or for promotion during a GC.
duke@435 396 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 397 // not to crash and burn, for instance, because of walking
duke@435 398 // pointers that could disappear as you were walking them.
duke@435 399 // The approximation is because the various components
duke@435 400 // that are read below are not read atomically (and
duke@435 401 // further the computation of totalSizeInIndexedFreeLists()
duke@435 402 // is itself a non-atomic computation. The normal use of
duke@435 403 // this is during a resize operation at the end of GC
duke@435 404 // and at that time you are guaranteed to get the
duke@435 405 // correct actual value. However, for instance, this is
duke@435 406 // also read completely asynchronously by the "perf-sampler"
duke@435 407 // that supports jvmstat, and you are apt to see the values
duke@435 408 // flicker in such cases.
duke@435 409 assert(_dictionary != NULL, "No _dictionary?");
jmasa@3732 410 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
duke@435 411 totalSizeInIndexedFreeLists() +
duke@435 412 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 413 }
duke@435 414
duke@435 415 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 416 assert(_dictionary != NULL, "No _dictionary?");
duke@435 417 assert_locked();
jmasa@3732 418 size_t res = _dictionary->max_chunk_size();
duke@435 419 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 420 (size_t) SmallForLinearAlloc - 1));
duke@435 421 // XXX the following could potentially be pretty slow;
duke@435 422 // should one, pesimally for the rare cases when res
duke@435 423 // caclulated above is less than IndexSetSize,
duke@435 424 // just return res calculated above? My reasoning was that
duke@435 425 // those cases will be so rare that the extra time spent doesn't
duke@435 426 // really matter....
duke@435 427 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 428 // to i > res below, because i is unsigned and res may be zero.
duke@435 429 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 430 i -= IndexSetStride) {
duke@435 431 if (_indexedFreeList[i].head() != NULL) {
duke@435 432 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 433 return i;
duke@435 434 }
duke@435 435 }
duke@435 436 return res;
duke@435 437 }
duke@435 438
ysr@2071 439 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 440 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 441 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
drchase@6680 442 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
ysr@2071 443 }
ysr@2071 444
ysr@2071 445 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 446 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 447 st->print_cr(" Space:");
ysr@2071 448 Space::print_on(st);
ysr@2071 449
ysr@2071 450 st->print_cr("promoInfo:");
ysr@2071 451 _promoInfo.print_on(st);
ysr@2071 452
ysr@2071 453 st->print_cr("_smallLinearAllocBlock");
ysr@2071 454 _smallLinearAllocBlock.print_on(st);
ysr@2071 455
ysr@2071 456 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 457
ysr@2071 458 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 459 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 460 }
ysr@2071 461
ysr@1580 462 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 463 const {
ysr@1580 464 reportIndexedFreeListStatistics();
ysr@1580 465 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 466 gclog_or_tty->print_cr("---------------------------");
jmasa@4196 467 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
ysr@1580 468 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 469 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 470 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 471 fc = fc->next()) {
ysr@1580 472 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
drchase@6680 473 p2i(fc), p2i((HeapWord*)fc + i),
ysr@1580 474 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 475 }
ysr@1580 476 }
ysr@1580 477 }
ysr@1580 478
ysr@1580 479 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 480 const {
ysr@1580 481 _promoInfo.print_on(st);
ysr@1580 482 }
ysr@1580 483
ysr@1580 484 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 485 const {
jmasa@3732 486 _dictionary->report_statistics();
ysr@1580 487 st->print_cr("Layout of Freelists in Tree");
ysr@1580 488 st->print_cr("---------------------------");
ysr@1580 489 _dictionary->print_free_lists(st);
ysr@1580 490 }
ysr@1580 491
ysr@1580 492 class BlkPrintingClosure: public BlkClosure {
ysr@1580 493 const CMSCollector* _collector;
ysr@1580 494 const CompactibleFreeListSpace* _sp;
ysr@1580 495 const CMSBitMap* _live_bit_map;
ysr@1580 496 const bool _post_remark;
ysr@1580 497 outputStream* _st;
ysr@1580 498 public:
ysr@1580 499 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 500 const CompactibleFreeListSpace* sp,
ysr@1580 501 const CMSBitMap* live_bit_map,
ysr@1580 502 outputStream* st):
ysr@1580 503 _collector(collector),
ysr@1580 504 _sp(sp),
ysr@1580 505 _live_bit_map(live_bit_map),
ysr@1580 506 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 507 _st(st) { }
ysr@1580 508 size_t do_blk(HeapWord* addr);
ysr@1580 509 };
ysr@1580 510
ysr@1580 511 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 512 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 513 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 514 if (_sp->block_is_obj(addr)) {
ysr@1580 515 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 516 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
drchase@6680 517 p2i(addr),
ysr@1580 518 dead ? "dead" : "live",
ysr@1580 519 sz,
ysr@1580 520 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 521 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 522 oop(addr)->print_on(_st);
ysr@1580 523 _st->print_cr("--------------------------------------");
ysr@1580 524 }
ysr@1580 525 } else { // free block
ysr@1580 526 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
drchase@6680 527 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 528 if (CMSPrintChunksInDump) {
ysr@1580 529 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 530 _st->print_cr("--------------------------------------");
ysr@1580 531 }
ysr@1580 532 }
ysr@1580 533 return sz;
ysr@1580 534 }
ysr@1580 535
ysr@1580 536 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 537 outputStream* st) {
ysr@1580 538 st->print_cr("\n=========================");
ysr@1580 539 st->print_cr("Block layout in CMS Heap:");
ysr@1580 540 st->print_cr("=========================");
ysr@1580 541 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 542 blk_iterate(&bpcl);
ysr@1580 543
ysr@1580 544 st->print_cr("\n=======================================");
ysr@1580 545 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 546 st->print_cr("=======================================");
ysr@1580 547 print_promo_info_blocks(st);
ysr@1580 548
ysr@1580 549 st->print_cr("\n===========================");
ysr@1580 550 st->print_cr("Order of Indexed Free Lists");
ysr@1580 551 st->print_cr("=========================");
ysr@1580 552 print_indexed_free_lists(st);
ysr@1580 553
ysr@1580 554 st->print_cr("\n=================================");
ysr@1580 555 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 556 st->print_cr("=================================");
ysr@1580 557 print_dictionary_free_lists(st);
ysr@1580 558 }
ysr@1580 559
ysr@1580 560
duke@435 561 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 562 assert_lock_strong(&_freelistLock);
duke@435 563 assert(PrintFLSStatistics != 0, "Reporting error");
jmasa@3732 564 _dictionary->report_statistics();
duke@435 565 if (PrintFLSStatistics > 1) {
duke@435 566 reportIndexedFreeListStatistics();
jmasa@3732 567 size_t total_size = totalSizeInIndexedFreeLists() +
jmasa@3732 568 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
hseigel@4465 569 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
duke@435 570 }
duke@435 571 }
duke@435 572
duke@435 573 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 574 assert_lock_strong(&_freelistLock);
duke@435 575 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 576 "--------------------------------\n");
jmasa@3732 577 size_t total_size = totalSizeInIndexedFreeLists();
jmasa@3732 578 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
drchase@6680 579 gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
drchase@6680 580 gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
drchase@6680 581 gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
jmasa@3732 582 if (free_blocks != 0) {
drchase@6680 583 gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
duke@435 584 }
duke@435 585 }
duke@435 586
duke@435 587 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 588 size_t res = 0;
duke@435 589 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 590 debug_only(
duke@435 591 ssize_t recount = 0;
duke@435 592 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 593 fc = fc->next()) {
duke@435 594 recount += 1;
duke@435 595 }
duke@435 596 assert(recount == _indexedFreeList[i].count(),
duke@435 597 "Incorrect count in list");
duke@435 598 )
duke@435 599 res += _indexedFreeList[i].count();
duke@435 600 }
duke@435 601 return res;
duke@435 602 }
duke@435 603
duke@435 604 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 605 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 606 if (_indexedFreeList[i].head() != NULL) {
duke@435 607 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 608 return (size_t)i;
duke@435 609 }
duke@435 610 }
duke@435 611 return 0;
duke@435 612 }
duke@435 613
duke@435 614 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 615 HeapWord* prevEnd = end();
duke@435 616 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 617 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 618 "New end is below unallocated block");
duke@435 619 _end = value;
duke@435 620 if (prevEnd != NULL) {
duke@435 621 // Resize the underlying block offset table.
duke@435 622 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 623 if (value <= prevEnd) {
ysr@2071 624 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 625 "New end is below unallocated block");
ysr@1580 626 } else {
ysr@1580 627 // Now, take this new chunk and add it to the free blocks.
ysr@1580 628 // Note that the BOT has not yet been updated for this block.
ysr@1580 629 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 630 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 631 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 632 // Mark the boundary of the new block in BOT
ysr@1580 633 _bt.mark_block(prevEnd, value);
ysr@1580 634 // put it all in the linAB
ysr@1580 635 if (ParallelGCThreads == 0) {
ysr@1580 636 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 637 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 638 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 639 } else { // ParallelGCThreads > 0
ysr@1580 640 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 641 Mutex::_no_safepoint_check_flag);
ysr@1580 642 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 643 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 644 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 645 }
ysr@1580 646 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 647 // of chunks as they are allocated out of a LinAB are.
ysr@1580 648 } else {
ysr@1580 649 // Add the block to the free lists, if possible coalescing it
ysr@1580 650 // with the last free block, and update the BOT and census data.
ysr@1580 651 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 652 }
duke@435 653 }
duke@435 654 }
duke@435 655 }
duke@435 656
duke@435 657 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 658 CompactibleFreeListSpace* _cfls;
duke@435 659 CMSCollector* _collector;
duke@435 660 protected:
duke@435 661 // Override.
duke@435 662 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 663 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 664 HeapWord* bottom, HeapWord* top, \
duke@435 665 ClosureType* cl); \
duke@435 666 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 667 HeapWord* bottom, HeapWord* top, \
duke@435 668 ClosureType* cl); \
duke@435 669 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 670 HeapWord* bottom, HeapWord* top, \
duke@435 671 ClosureType* cl)
coleenp@4037 672 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
duke@435 673 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 674
duke@435 675 public:
duke@435 676 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 677 CMSCollector* collector,
coleenp@4037 678 ExtendedOopClosure* cl,
duke@435 679 CardTableModRefBS::PrecisionStyle precision,
duke@435 680 HeapWord* boundary) :
duke@435 681 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 682 _cfls(sp), _collector(collector) {}
duke@435 683 };
duke@435 684
duke@435 685 // We de-virtualize the block-related calls below, since we know that our
duke@435 686 // space is a CompactibleFreeListSpace.
jmasa@3294 687
duke@435 688 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 689 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 690 HeapWord* bottom, \
duke@435 691 HeapWord* top, \
duke@435 692 ClosureType* cl) { \
jmasa@3294 693 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 694 if (is_par) { \
jmasa@3294 695 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 696 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 697 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 698 } else { \
duke@435 699 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 700 } \
duke@435 701 } \
duke@435 702 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 703 HeapWord* bottom, \
duke@435 704 HeapWord* top, \
duke@435 705 ClosureType* cl) { \
duke@435 706 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 707 back too far. */ \
duke@435 708 HeapWord* mr_start = mr.start(); \
duke@435 709 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 710 HeapWord* next = bottom + bot_size; \
duke@435 711 while (next < mr_start) { \
duke@435 712 bottom = next; \
duke@435 713 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 714 next = bottom + bot_size; \
duke@435 715 } \
duke@435 716 \
duke@435 717 while (bottom < top) { \
duke@435 718 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 719 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 720 oop(bottom)) && \
duke@435 721 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 722 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 723 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 724 } else { \
duke@435 725 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 726 } \
duke@435 727 } \
duke@435 728 } \
duke@435 729 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 730 HeapWord* bottom, \
duke@435 731 HeapWord* top, \
duke@435 732 ClosureType* cl) { \
duke@435 733 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 734 back too far. */ \
duke@435 735 HeapWord* mr_start = mr.start(); \
duke@435 736 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 737 HeapWord* next = bottom + bot_size; \
duke@435 738 while (next < mr_start) { \
duke@435 739 bottom = next; \
duke@435 740 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 741 next = bottom + bot_size; \
duke@435 742 } \
duke@435 743 \
duke@435 744 while (bottom < top) { \
duke@435 745 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 746 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 747 oop(bottom)) && \
duke@435 748 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 749 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 750 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 751 } else { \
duke@435 752 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 753 } \
duke@435 754 } \
duke@435 755 }
duke@435 756
duke@435 757 // (There are only two of these, rather than N, because the split is due
duke@435 758 // only to the introduction of the FilteringClosure, a local part of the
duke@435 759 // impl of this abstraction.)
coleenp@4037 760 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
duke@435 761 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 762
duke@435 763 DirtyCardToOopClosure*
coleenp@4037 764 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 765 CardTableModRefBS::PrecisionStyle precision,
duke@435 766 HeapWord* boundary) {
duke@435 767 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 768 }
duke@435 769
duke@435 770
duke@435 771 // Note on locking for the space iteration functions:
duke@435 772 // since the collector's iteration activities are concurrent with
duke@435 773 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 774 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 775 // may suddenly be allocated or divided up and part of it allocated and
duke@435 776 // so on.
duke@435 777
duke@435 778 // Apply the given closure to each block in the space.
duke@435 779 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 780 assert_lock_strong(freelistLock());
duke@435 781 HeapWord *cur, *limit;
duke@435 782 for (cur = bottom(), limit = end(); cur < limit;
duke@435 783 cur += cl->do_blk_careful(cur));
duke@435 784 }
duke@435 785
duke@435 786 // Apply the given closure to each block in the space.
duke@435 787 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 788 assert_lock_strong(freelistLock());
duke@435 789 HeapWord *cur, *limit;
duke@435 790 for (cur = bottom(), limit = end(); cur < limit;
duke@435 791 cur += cl->do_blk(cur));
duke@435 792 }
duke@435 793
duke@435 794 // Apply the given closure to each oop in the space.
coleenp@4037 795 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
duke@435 796 assert_lock_strong(freelistLock());
duke@435 797 HeapWord *cur, *limit;
duke@435 798 size_t curSize;
duke@435 799 for (cur = bottom(), limit = end(); cur < limit;
duke@435 800 cur += curSize) {
duke@435 801 curSize = block_size(cur);
duke@435 802 if (block_is_obj(cur)) {
duke@435 803 oop(cur)->oop_iterate(cl);
duke@435 804 }
duke@435 805 }
duke@435 806 }
duke@435 807
duke@435 808 // NOTE: In the following methods, in order to safely be able to
duke@435 809 // apply the closure to an object, we need to be sure that the
duke@435 810 // object has been initialized. We are guaranteed that an object
duke@435 811 // is initialized if we are holding the Heap_lock with the
duke@435 812 // world stopped.
duke@435 813 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 814 if (is_init_completed()) {
duke@435 815 assert_locked_or_safepoint(Heap_lock);
duke@435 816 if (Universe::is_fully_initialized()) {
duke@435 817 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 818 "Required for objects to be initialized");
duke@435 819 }
duke@435 820 } // else make a concession at vm start-up
duke@435 821 }
duke@435 822
duke@435 823 // Apply the given closure to each object in the space
duke@435 824 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 825 assert_lock_strong(freelistLock());
duke@435 826 NOT_PRODUCT(verify_objects_initialized());
duke@435 827 HeapWord *cur, *limit;
duke@435 828 size_t curSize;
duke@435 829 for (cur = bottom(), limit = end(); cur < limit;
duke@435 830 cur += curSize) {
duke@435 831 curSize = block_size(cur);
duke@435 832 if (block_is_obj(cur)) {
duke@435 833 blk->do_object(oop(cur));
duke@435 834 }
duke@435 835 }
duke@435 836 }
duke@435 837
jmasa@952 838 // Apply the given closure to each live object in the space
jmasa@952 839 // The usage of CompactibleFreeListSpace
jmasa@952 840 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 841 // objects in the space with references to objects that are no longer
jmasa@952 842 // valid. For example, an object may reference another object
jmasa@952 843 // that has already been sweep up (collected). This method uses
jmasa@952 844 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 845 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 846 // object is decided.
jmasa@952 847
jmasa@952 848 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 849 assert_lock_strong(freelistLock());
jmasa@952 850 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 851 HeapWord *cur, *limit;
jmasa@952 852 size_t curSize;
jmasa@952 853 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 854 cur += curSize) {
jmasa@952 855 curSize = block_size(cur);
jmasa@952 856 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 857 blk->do_object(oop(cur));
jmasa@952 858 }
jmasa@952 859 }
jmasa@952 860 }
jmasa@952 861
duke@435 862 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 863 UpwardsObjectClosure* cl) {
ysr@1580 864 assert_locked(freelistLock());
duke@435 865 NOT_PRODUCT(verify_objects_initialized());
mgerdin@6979 866 assert(!mr.is_empty(), "Should be non-empty");
mgerdin@6979 867 // We use MemRegion(bottom(), end()) rather than used_region() below
mgerdin@6979 868 // because the two are not necessarily equal for some kinds of
mgerdin@6979 869 // spaces, in particular, certain kinds of free list spaces.
mgerdin@6979 870 // We could use the more complicated but more precise:
mgerdin@6979 871 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
mgerdin@6979 872 // but the slight imprecision seems acceptable in the assertion check.
mgerdin@6979 873 assert(MemRegion(bottom(), end()).contains(mr),
mgerdin@6979 874 "Should be within used space");
mgerdin@6979 875 HeapWord* prev = cl->previous(); // max address from last time
mgerdin@6979 876 if (prev >= mr.end()) { // nothing to do
mgerdin@6979 877 return;
mgerdin@6979 878 }
mgerdin@6979 879 // This assert will not work when we go from cms space to perm
mgerdin@6979 880 // space, and use same closure. Easy fix deferred for later. XXX YSR
mgerdin@6979 881 // assert(prev == NULL || contains(prev), "Should be within space");
mgerdin@6979 882
mgerdin@6979 883 bool last_was_obj_array = false;
mgerdin@6979 884 HeapWord *blk_start_addr, *region_start_addr;
mgerdin@6979 885 if (prev > mr.start()) {
mgerdin@6979 886 region_start_addr = prev;
mgerdin@6979 887 blk_start_addr = prev;
mgerdin@6979 888 // The previous invocation may have pushed "prev" beyond the
mgerdin@6979 889 // last allocated block yet there may be still be blocks
mgerdin@6979 890 // in this region due to a particular coalescing policy.
mgerdin@6979 891 // Relax the assertion so that the case where the unallocated
mgerdin@6979 892 // block is maintained and "prev" is beyond the unallocated
mgerdin@6979 893 // block does not cause the assertion to fire.
mgerdin@6979 894 assert((BlockOffsetArrayUseUnallocatedBlock &&
mgerdin@6979 895 (!is_in(prev))) ||
mgerdin@6979 896 (blk_start_addr == block_start(region_start_addr)), "invariant");
mgerdin@6979 897 } else {
mgerdin@6979 898 region_start_addr = mr.start();
mgerdin@6979 899 blk_start_addr = block_start(region_start_addr);
mgerdin@6979 900 }
mgerdin@6979 901 HeapWord* region_end_addr = mr.end();
mgerdin@6979 902 MemRegion derived_mr(region_start_addr, region_end_addr);
mgerdin@6979 903 while (blk_start_addr < region_end_addr) {
mgerdin@6979 904 const size_t size = block_size(blk_start_addr);
mgerdin@6979 905 if (block_is_obj(blk_start_addr)) {
mgerdin@6979 906 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
mgerdin@6979 907 } else {
mgerdin@6979 908 last_was_obj_array = false;
mgerdin@6979 909 }
mgerdin@6979 910 blk_start_addr += size;
mgerdin@6979 911 }
mgerdin@6979 912 if (!last_was_obj_array) {
mgerdin@6979 913 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
mgerdin@6979 914 "Should be within (closed) used space");
mgerdin@6979 915 assert(blk_start_addr > prev, "Invariant");
mgerdin@6979 916 cl->set_previous(blk_start_addr); // min address for next time
mgerdin@6979 917 }
duke@435 918 }
duke@435 919
duke@435 920
duke@435 921 // Callers of this iterator beware: The closure application should
duke@435 922 // be robust in the face of uninitialized objects and should (always)
duke@435 923 // return a correct size so that the next addr + size below gives us a
duke@435 924 // valid block boundary. [See for instance,
duke@435 925 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 926 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 927 HeapWord*
duke@435 928 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 929 ObjectClosureCareful* cl) {
duke@435 930 assert_lock_strong(freelistLock());
duke@435 931 // Can't use used_region() below because it may not necessarily
duke@435 932 // be the same as [bottom(),end()); although we could
duke@435 933 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 934 // that appears too cumbersome, so we just do the simpler check
duke@435 935 // in the assertion below.
duke@435 936 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 937 "mr should be non-empty and within used space");
duke@435 938 HeapWord *addr, *end;
duke@435 939 size_t size;
duke@435 940 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 941 addr < end; addr += size) {
duke@435 942 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 943 if (fc->is_free()) {
duke@435 944 // Since we hold the free list lock, which protects direct
duke@435 945 // allocation in this generation by mutators, a free object
duke@435 946 // will remain free throughout this iteration code.
duke@435 947 size = fc->size();
duke@435 948 } else {
duke@435 949 // Note that the object need not necessarily be initialized,
duke@435 950 // because (for instance) the free list lock does NOT protect
duke@435 951 // object initialization. The closure application below must
duke@435 952 // therefore be correct in the face of uninitialized objects.
duke@435 953 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 954 if (size == 0) {
duke@435 955 // An unparsable object found. Signal early termination.
duke@435 956 return addr;
duke@435 957 }
duke@435 958 }
duke@435 959 }
duke@435 960 return NULL;
duke@435 961 }
duke@435 962
duke@435 963
ysr@777 964 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 965 NOT_PRODUCT(verify_objects_initialized());
duke@435 966 return _bt.block_start(p);
duke@435 967 }
duke@435 968
duke@435 969 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 970 return _bt.block_start_careful(p);
duke@435 971 }
duke@435 972
duke@435 973 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 974 NOT_PRODUCT(verify_objects_initialized());
duke@435 975 // This must be volatile, or else there is a danger that the compiler
duke@435 976 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 977 // the value read the first time in a register.
duke@435 978 while (true) {
duke@435 979 // We must do this until we get a consistent view of the object.
coleenp@622 980 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 981 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 982 size_t res = fc->size();
goetz@6493 983
goetz@6493 984 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 985 // block's free bit was set and we have read the size of the
goetz@6493 986 // block. Acquire and check the free bit again. If the block is
goetz@6493 987 // still free, the read size is correct.
goetz@6493 988 OrderAccess::acquire();
goetz@6493 989
coleenp@622 990 // If the object is still a free chunk, return the size, else it
coleenp@622 991 // has been allocated so try again.
coleenp@622 992 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 993 assert(res != 0, "Block size should not be 0");
duke@435 994 return res;
duke@435 995 }
coleenp@622 996 } else {
fyang@9975 997 // The barrier is required to prevent reordering of the free chunk check
fyang@9975 998 // and the klass read.
fyang@9975 999 OrderAccess::loadload();
fyang@9975 1000
coleenp@622 1001 // must read from what 'p' points to in each loop.
coleenp@4037 1002 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1003 if (k != NULL) {
coleenp@4037 1004 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1005 oop o = (oop)p;
coleenp@622 1006 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
goetz@6493 1007
goetz@6493 1008 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1009 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1010 // size (third word) is consistent.
goetz@6493 1011 OrderAccess::acquire();
goetz@6493 1012
coleenp@4037 1013 size_t res = o->size_given_klass(k);
coleenp@622 1014 res = adjustObjectSize(res);
coleenp@622 1015 assert(res != 0, "Block size should not be 0");
coleenp@622 1016 return res;
coleenp@622 1017 }
duke@435 1018 }
duke@435 1019 }
duke@435 1020 }
duke@435 1021
coleenp@4037 1022 // TODO: Now that is_parsable is gone, we should combine these two functions.
duke@435 1023 // A variant of the above that uses the Printezis bits for
duke@435 1024 // unparsable but allocated objects. This avoids any possible
duke@435 1025 // stalls waiting for mutators to initialize objects, and is
duke@435 1026 // thus potentially faster than the variant above. However,
duke@435 1027 // this variant may return a zero size for a block that is
duke@435 1028 // under mutation and for which a consistent size cannot be
duke@435 1029 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1030 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1031 const CMSCollector* c)
duke@435 1032 const {
duke@435 1033 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1034 // This must be volatile, or else there is a danger that the compiler
duke@435 1035 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1036 // the value read the first time in a register.
duke@435 1037 DEBUG_ONLY(uint loops = 0;)
duke@435 1038 while (true) {
duke@435 1039 // We must do this until we get a consistent view of the object.
coleenp@622 1040 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1041 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1042 size_t res = fc->size();
goetz@6493 1043
goetz@6493 1044 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1045 // free bit of the block was set and we have read the size of
goetz@6493 1046 // the block. Acquire and check the free bit again. If the
goetz@6493 1047 // block is still free, the read size is correct.
goetz@6493 1048 OrderAccess::acquire();
goetz@6493 1049
coleenp@622 1050 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1051 assert(res != 0, "Block size should not be 0");
duke@435 1052 assert(loops == 0, "Should be 0");
duke@435 1053 return res;
duke@435 1054 }
duke@435 1055 } else {
fyang@9975 1056 // The barrier is required to prevent reordering of the free chunk check
fyang@9975 1057 // and the klass read.
fyang@9975 1058 OrderAccess::loadload();
fyang@9975 1059
coleenp@622 1060 // must read from what 'p' points to in each loop.
coleenp@4037 1061 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1062 // We trust the size of any object that has a non-NULL
ysr@2533 1063 // klass and (for those in the perm gen) is parsable
ysr@2533 1064 // -- irrespective of its conc_safe-ty.
coleenp@4037 1065 if (k != NULL) {
coleenp@4037 1066 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1067 oop o = (oop)p;
coleenp@622 1068 assert(o->is_oop(), "Should be an oop");
goetz@6493 1069
goetz@6493 1070 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1071 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1072 // size (third word) is consistent.
goetz@6493 1073 OrderAccess::acquire();
goetz@6493 1074
coleenp@4037 1075 size_t res = o->size_given_klass(k);
coleenp@622 1076 res = adjustObjectSize(res);
coleenp@622 1077 assert(res != 0, "Block size should not be 0");
coleenp@622 1078 return res;
coleenp@622 1079 } else {
ysr@2533 1080 // May return 0 if P-bits not present.
coleenp@622 1081 return c->block_size_if_printezis_bits(p);
coleenp@622 1082 }
duke@435 1083 }
duke@435 1084 assert(loops == 0, "Can loop at most once");
duke@435 1085 DEBUG_ONLY(loops++;)
duke@435 1086 }
duke@435 1087 }
duke@435 1088
duke@435 1089 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1090 NOT_PRODUCT(verify_objects_initialized());
duke@435 1091 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1092 FreeChunk* fc = (FreeChunk*)p;
jmasa@3732 1093 if (fc->is_free()) {
duke@435 1094 return fc->size();
duke@435 1095 } else {
duke@435 1096 // Ignore mark word because this may be a recently promoted
duke@435 1097 // object whose mark word is used to chain together grey
duke@435 1098 // objects (the last one would have a null value).
duke@435 1099 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1100 return adjustObjectSize(oop(p)->size());
duke@435 1101 }
duke@435 1102 }
duke@435 1103
duke@435 1104 // This implementation assumes that the property of "being an object" is
duke@435 1105 // stable. But being a free chunk may not be (because of parallel
duke@435 1106 // promotion.)
duke@435 1107 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1108 FreeChunk* fc = (FreeChunk*)p;
duke@435 1109 assert(is_in_reserved(p), "Should be in space");
duke@435 1110 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1111 // assertion may fail because prepare_for_compaction() uses
duke@435 1112 // space that is garbage to maintain information on ranges of
duke@435 1113 // live objects so that these live ranges can be moved as a whole.
duke@435 1114 // Comment out this assertion until that problem can be solved
duke@435 1115 // (i.e., that the block start calculation may look at objects
duke@435 1116 // at address below "p" in finding the object that contains "p"
duke@435 1117 // and those objects (if garbage) may have been modified to hold
duke@435 1118 // live range information.
jmasa@2188 1119 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1120 // "Should be a block boundary");
coleenp@622 1121 if (FreeChunk::indicatesFreeChunk(p)) return false;
fyang@9975 1122
fyang@9975 1123 // The barrier is required to prevent reordering of the free chunk check
fyang@9975 1124 // and the klass read.
fyang@9975 1125 OrderAccess::loadload();
fyang@9975 1126
coleenp@4037 1127 Klass* k = oop(p)->klass_or_null();
duke@435 1128 if (k != NULL) {
duke@435 1129 // Ignore mark word because it may have been used to
duke@435 1130 // chain together promoted objects (the last one
duke@435 1131 // would have a null value).
duke@435 1132 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1133 return true;
duke@435 1134 } else {
duke@435 1135 return false; // Was not an object at the start of collection.
duke@435 1136 }
duke@435 1137 }
duke@435 1138
duke@435 1139 // Check if the object is alive. This fact is checked either by consulting
duke@435 1140 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1141 // generation and we're not in the sweeping phase, by checking the
duke@435 1142 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1143 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1144 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1145 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1146 "Else races are possible");
ysr@2293 1147 assert(block_is_obj(p), "The address should point to an object");
duke@435 1148
duke@435 1149 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1150 // for both perm gen and old gen.
duke@435 1151 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1152 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1153 // main marking bit map (live_map below) is locked,
duke@435 1154 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1155 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1156 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1157 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1158 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1159 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1160 // if/when the perm gen goes away in the future.
duke@435 1161 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1162 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1163 return live_map->par_isMarked((HeapWord*) p);
duke@435 1164 }
duke@435 1165 return true;
duke@435 1166 }
duke@435 1167
duke@435 1168 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1169 FreeChunk* fc = (FreeChunk*)p;
duke@435 1170 assert(is_in_reserved(p), "Should be in space");
duke@435 1171 assert(_bt.block_start(p) == p, "Should be a block boundary");
jmasa@3732 1172 if (!fc->is_free()) {
duke@435 1173 // Ignore mark word because it may have been used to
duke@435 1174 // chain together promoted objects (the last one
duke@435 1175 // would have a null value).
duke@435 1176 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1177 return true;
duke@435 1178 }
duke@435 1179 return false;
duke@435 1180 }
duke@435 1181
duke@435 1182 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1183 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1184 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1185 size_t size = 0;
duke@435 1186 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1187 debug_only(
duke@435 1188 // We may be calling here without the lock in which case we
duke@435 1189 // won't do this modest sanity check.
duke@435 1190 if (freelistLock()->owned_by_self()) {
duke@435 1191 size_t total_list_size = 0;
duke@435 1192 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1193 fc = fc->next()) {
duke@435 1194 total_list_size += i;
duke@435 1195 }
duke@435 1196 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1197 "Count in list is incorrect");
duke@435 1198 }
duke@435 1199 )
duke@435 1200 size += i * _indexedFreeList[i].count();
duke@435 1201 }
duke@435 1202 return size;
duke@435 1203 }
duke@435 1204
duke@435 1205 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1206 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1207 return allocate(size);
duke@435 1208 }
duke@435 1209
duke@435 1210 HeapWord*
duke@435 1211 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1212 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1213 }
duke@435 1214
duke@435 1215 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1216 assert_lock_strong(freelistLock());
duke@435 1217 HeapWord* res = NULL;
duke@435 1218 assert(size == adjustObjectSize(size),
duke@435 1219 "use adjustObjectSize() before calling into allocate()");
duke@435 1220
duke@435 1221 if (_adaptive_freelists) {
duke@435 1222 res = allocate_adaptive_freelists(size);
duke@435 1223 } else { // non-adaptive free lists
duke@435 1224 res = allocate_non_adaptive_freelists(size);
duke@435 1225 }
duke@435 1226
duke@435 1227 if (res != NULL) {
duke@435 1228 // check that res does lie in this space!
duke@435 1229 assert(is_in_reserved(res), "Not in this space!");
duke@435 1230 assert(is_aligned((void*)res), "alignment check");
duke@435 1231
duke@435 1232 FreeChunk* fc = (FreeChunk*)res;
duke@435 1233 fc->markNotFree();
jmasa@3732 1234 assert(!fc->is_free(), "shouldn't be marked free");
coleenp@622 1235 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1236 // Verify that the block offset table shows this to
duke@435 1237 // be a single block, but not one which is unallocated.
duke@435 1238 _bt.verify_single_block(res, size);
duke@435 1239 _bt.verify_not_unallocated(res, size);
duke@435 1240 // mangle a just allocated object with a distinct pattern.
duke@435 1241 debug_only(fc->mangleAllocated(size));
duke@435 1242 }
duke@435 1243
zgu@9793 1244 // During GC we do not need to recalculate the stable used value for
zgu@9793 1245 // every allocation in old gen. It is done once at the end of GC instead
zgu@9793 1246 // for performance reasons.
zgu@9793 1247 if (!Universe::heap()->is_gc_active()) {
zgu@9793 1248 recalculate_used_stable();
zgu@9793 1249 }
zgu@9793 1250
duke@435 1251 return res;
duke@435 1252 }
duke@435 1253
duke@435 1254 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1255 HeapWord* res = NULL;
duke@435 1256 // try and use linear allocation for smaller blocks
duke@435 1257 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1258 // if successful, the following also adjusts block offset table
duke@435 1259 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1260 }
duke@435 1261 // Else triage to indexed lists for smaller sizes
duke@435 1262 if (res == NULL) {
duke@435 1263 if (size < SmallForDictionary) {
duke@435 1264 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1265 } else {
duke@435 1266 // else get it from the big dictionary; if even this doesn't
duke@435 1267 // work we are out of luck.
duke@435 1268 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1269 }
duke@435 1270 }
duke@435 1271
duke@435 1272 return res;
duke@435 1273 }
duke@435 1274
duke@435 1275 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1276 assert_lock_strong(freelistLock());
duke@435 1277 HeapWord* res = NULL;
duke@435 1278 assert(size == adjustObjectSize(size),
duke@435 1279 "use adjustObjectSize() before calling into allocate()");
duke@435 1280
duke@435 1281 // Strategy
duke@435 1282 // if small
duke@435 1283 // exact size from small object indexed list if small
duke@435 1284 // small or large linear allocation block (linAB) as appropriate
duke@435 1285 // take from lists of greater sized chunks
duke@435 1286 // else
duke@435 1287 // dictionary
duke@435 1288 // small or large linear allocation block if it has the space
duke@435 1289 // Try allocating exact size from indexTable first
duke@435 1290 if (size < IndexSetSize) {
duke@435 1291 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1292 if(res != NULL) {
duke@435 1293 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1294 "Not removed from free list");
duke@435 1295 // no block offset table adjustment is necessary on blocks in
duke@435 1296 // the indexed lists.
duke@435 1297
duke@435 1298 // Try allocating from the small LinAB
duke@435 1299 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1300 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1301 // if successful, the above also adjusts block offset table
duke@435 1302 // Note that this call will refill the LinAB to
duke@435 1303 // satisfy the request. This is different that
duke@435 1304 // evm.
duke@435 1305 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1306 } else {
duke@435 1307 // Raid the exact free lists larger than size, even if they are not
duke@435 1308 // overpopulated.
duke@435 1309 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1310 }
duke@435 1311 } else {
duke@435 1312 // Big objects get allocated directly from the dictionary.
duke@435 1313 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1314 if (res == NULL) {
duke@435 1315 // Try hard not to fail since an allocation failure will likely
duke@435 1316 // trigger a synchronous GC. Try to get the space from the
duke@435 1317 // allocation blocks.
duke@435 1318 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1319 }
duke@435 1320 }
duke@435 1321
duke@435 1322 return res;
duke@435 1323 }
duke@435 1324
duke@435 1325 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1326 // when promoting obj.
duke@435 1327 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1328 // Depending on the object size, expansion may require refilling either a
duke@435 1329 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1330 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1331 size_t space = obj_size;
duke@435 1332 if (!_adaptive_freelists) {
duke@435 1333 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1334 }
duke@435 1335 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1336 return space;
duke@435 1337 }
duke@435 1338
duke@435 1339 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1340 FreeChunk* ret;
duke@435 1341
duke@435 1342 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1343 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1344 "Should not be here");
duke@435 1345
duke@435 1346 size_t i;
duke@435 1347 size_t currSize = numWords + MinChunkSize;
duke@435 1348 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1349 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 1350 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
duke@435 1351 if (fl->head()) {
duke@435 1352 ret = getFromListGreater(fl, numWords);
jmasa@3732 1353 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1354 return ret;
duke@435 1355 }
duke@435 1356 }
duke@435 1357
duke@435 1358 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1359 (size_t)(numWords + MinChunkSize));
duke@435 1360
duke@435 1361 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1362 fragmentation that can't be handled. */
duke@435 1363 {
jmasa@3732 1364 ret = dictionary()->get_chunk(currSize);
duke@435 1365 if (ret != NULL) {
duke@435 1366 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1367 "Chunk is too small");
duke@435 1368 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1369 /* Carve returned chunk. */
duke@435 1370 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1371 /* Label this as no longer a free chunk. */
jmasa@3732 1372 assert(ret->is_free(), "This chunk should be free");
jmasa@3732 1373 ret->link_prev(NULL);
duke@435 1374 }
jmasa@3732 1375 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1376 return ret;
duke@435 1377 }
duke@435 1378 ShouldNotReachHere();
duke@435 1379 }
duke@435 1380
ysr@3220 1381 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1382 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
jmasa@3732 1383 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
duke@435 1384 }
duke@435 1385
ysr@3220 1386 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1387 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1388 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1389 "Linear allocation block shows incorrect size");
ysr@3220 1390 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1391 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1392 }
ysr@3220 1393
ysr@3220 1394 // Check if the purported free chunk is present either as a linear
ysr@3220 1395 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1396 // or the larger free blocks kept in the binary tree dictionary.
jmasa@3732 1397 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
ysr@3220 1398 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1399 return true;
ysr@3220 1400 } else if (fc->size() < IndexSetSize) {
ysr@3220 1401 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1402 } else {
jmasa@3732 1403 return dictionary()->verify_chunk_in_free_list(fc);
duke@435 1404 }
duke@435 1405 }
duke@435 1406
duke@435 1407 #ifndef PRODUCT
duke@435 1408 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1409 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1410 }
ysr@1580 1411
ysr@1580 1412 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1413 CMSLockVerifier::assert_locked(lock);
ysr@1580 1414 }
duke@435 1415 #endif
duke@435 1416
duke@435 1417 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1418 // In the parallel case, the main thread holds the free list lock
duke@435 1419 // on behalf the parallel threads.
duke@435 1420 FreeChunk* fc;
duke@435 1421 {
duke@435 1422 // If GC is parallel, this might be called by several threads.
duke@435 1423 // This should be rare enough that the locking overhead won't affect
duke@435 1424 // the sequential code.
duke@435 1425 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1426 Mutex::_no_safepoint_check_flag);
duke@435 1427 fc = getChunkFromDictionary(size);
duke@435 1428 }
duke@435 1429 if (fc != NULL) {
duke@435 1430 fc->dontCoalesce();
jmasa@3732 1431 assert(fc->is_free(), "Should be free, but not coalescable");
duke@435 1432 // Verify that the block offset table shows this to
duke@435 1433 // be a single block, but not one which is unallocated.
duke@435 1434 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1435 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1436 }
duke@435 1437 return fc;
duke@435 1438 }
duke@435 1439
coleenp@548 1440 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1441 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1442 assert_locked();
duke@435 1443
duke@435 1444 // if we are tracking promotions, then first ensure space for
duke@435 1445 // promotion (including spooling space for saving header if necessary).
duke@435 1446 // then allocate and copy, then track promoted info if needed.
duke@435 1447 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1448 // be displaced and in this case restoration of the mark word
duke@435 1449 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1450 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1451 return NULL;
duke@435 1452 }
duke@435 1453 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1454 // additional call through the allocate(size_t) form. Having
duke@435 1455 // the compile inline the call is problematic because allocate(size_t)
duke@435 1456 // is a virtual method.
duke@435 1457 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1458 if (res != NULL) {
duke@435 1459 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1460 // if we should be tracking promotions, do so.
duke@435 1461 if (_promoInfo.tracking()) {
duke@435 1462 _promoInfo.track((PromotedObject*)res);
duke@435 1463 }
duke@435 1464 }
duke@435 1465 return oop(res);
duke@435 1466 }
duke@435 1467
duke@435 1468 HeapWord*
duke@435 1469 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1470 assert_locked();
duke@435 1471 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1472 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1473 "maximum from smallLinearAllocBlock");
duke@435 1474 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1475 }
duke@435 1476
duke@435 1477 HeapWord*
duke@435 1478 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1479 size_t size) {
duke@435 1480 assert_locked();
duke@435 1481 assert(size >= MinChunkSize, "too small");
duke@435 1482 HeapWord* res = NULL;
duke@435 1483 // Try to do linear allocation from blk, making sure that
duke@435 1484 if (blk->_word_size == 0) {
duke@435 1485 // We have probably been unable to fill this either in the prologue or
duke@435 1486 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1487 // next time.
duke@435 1488 assert(blk->_ptr == NULL, "consistency check");
duke@435 1489 return NULL;
duke@435 1490 }
duke@435 1491 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1492 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1493 if (res != NULL) return res;
duke@435 1494
duke@435 1495 // about to exhaust this linear allocation block
duke@435 1496 if (blk->_word_size == size) { // exactly satisfied
duke@435 1497 res = blk->_ptr;
duke@435 1498 _bt.allocated(res, blk->_word_size);
duke@435 1499 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1500 size_t sz = blk->_word_size;
duke@435 1501 // Update _unallocated_block if the size is such that chunk would be
duke@435 1502 // returned to the indexed free list. All other chunks in the indexed
duke@435 1503 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1504 // has already been adjusted for them. Do it here so that the cost
duke@435 1505 // for all chunks added back to the indexed free lists.
ysr@1580 1506 if (sz < SmallForDictionary) {
ysr@1580 1507 _bt.allocated(blk->_ptr, sz);
duke@435 1508 }
duke@435 1509 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1510 addChunkToFreeLists(blk->_ptr, sz);
jmasa@3732 1511 split_birth(sz);
duke@435 1512 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1513 } else {
duke@435 1514 // A refilled block would not satisfy the request.
duke@435 1515 return NULL;
duke@435 1516 }
duke@435 1517
duke@435 1518 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1519 refillLinearAllocBlock(blk);
duke@435 1520 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1521 "block was replenished");
duke@435 1522 if (res != NULL) {
jmasa@3732 1523 split_birth(size);
duke@435 1524 repairLinearAllocBlock(blk);
duke@435 1525 } else if (blk->_ptr != NULL) {
duke@435 1526 res = blk->_ptr;
duke@435 1527 size_t blk_size = blk->_word_size;
duke@435 1528 blk->_word_size -= size;
duke@435 1529 blk->_ptr += size;
jmasa@3732 1530 split_birth(size);
duke@435 1531 repairLinearAllocBlock(blk);
duke@435 1532 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1533 // view of the BOT and free blocks.
duke@435 1534 // Above must occur before BOT is updated below.
ysr@2071 1535 OrderAccess::storestore();
duke@435 1536 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1537 }
duke@435 1538 return res;
duke@435 1539 }
duke@435 1540
duke@435 1541 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1542 LinearAllocBlock* blk,
duke@435 1543 size_t size) {
duke@435 1544 assert_locked();
duke@435 1545 assert(size >= MinChunkSize, "too small");
duke@435 1546
duke@435 1547 HeapWord* res = NULL;
duke@435 1548 // This is the common case. Keep it simple.
duke@435 1549 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1550 assert(blk->_ptr != NULL, "consistency check");
duke@435 1551 res = blk->_ptr;
duke@435 1552 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1553 // indicates the start of the linAB. The split_block() updates the
duke@435 1554 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1555 // next chunk to be allocated).
duke@435 1556 size_t blk_size = blk->_word_size;
duke@435 1557 blk->_word_size -= size;
duke@435 1558 blk->_ptr += size;
jmasa@3732 1559 split_birth(size);
duke@435 1560 repairLinearAllocBlock(blk);
duke@435 1561 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1562 // view of the BOT and free blocks.
duke@435 1563 // Above must occur before BOT is updated below.
ysr@2071 1564 OrderAccess::storestore();
duke@435 1565 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1566 _bt.allocated(res, size);
duke@435 1567 }
duke@435 1568 return res;
duke@435 1569 }
duke@435 1570
duke@435 1571 FreeChunk*
duke@435 1572 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1573 assert_locked();
duke@435 1574 assert(size < SmallForDictionary, "just checking");
duke@435 1575 FreeChunk* res;
jmasa@3732 1576 res = _indexedFreeList[size].get_chunk_at_head();
duke@435 1577 if (res == NULL) {
duke@435 1578 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1579 }
duke@435 1580 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1581 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1582 return res;
duke@435 1583 }
duke@435 1584
duke@435 1585 FreeChunk*
ysr@1580 1586 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1587 bool replenish) {
duke@435 1588 assert_locked();
duke@435 1589 FreeChunk* fc = NULL;
duke@435 1590 if (size < SmallForDictionary) {
duke@435 1591 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1592 _indexedFreeList[size].surplus() <= 0,
duke@435 1593 "List for this size should be empty or under populated");
duke@435 1594 // Try best fit in exact lists before replenishing the list
duke@435 1595 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1596 // Replenish list.
duke@435 1597 //
duke@435 1598 // Things tried that failed.
duke@435 1599 // Tried allocating out of the two LinAB's first before
duke@435 1600 // replenishing lists.
duke@435 1601 // Tried small linAB of size 256 (size in indexed list)
duke@435 1602 // and replenishing indexed lists from the small linAB.
duke@435 1603 //
duke@435 1604 FreeChunk* newFc = NULL;
ysr@1580 1605 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1606 if (replenish_size < SmallForDictionary) {
duke@435 1607 // Do not replenish from an underpopulated size.
duke@435 1608 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1609 _indexedFreeList[replenish_size].head() != NULL) {
jmasa@3732 1610 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
ysr@1580 1611 } else if (bestFitFirst()) {
duke@435 1612 newFc = bestFitSmall(replenish_size);
duke@435 1613 }
duke@435 1614 }
ysr@1580 1615 if (newFc == NULL && replenish_size > size) {
ysr@1580 1616 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1617 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1618 }
ysr@1580 1619 // Note: The stats update re split-death of block obtained above
ysr@1580 1620 // will be recorded below precisely when we know we are going to
ysr@1580 1621 // be actually splitting it into more than one pieces below.
duke@435 1622 if (newFc != NULL) {
ysr@1580 1623 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1624 // Replenish this list and return one block to caller.
ysr@1580 1625 size_t i;
ysr@1580 1626 FreeChunk *curFc, *nextFc;
ysr@1580 1627 size_t num_blk = newFc->size() / size;
ysr@1580 1628 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1629 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1630 if (num_blk > 1) {
ysr@1580 1631 // we are sure we will be splitting the block just obtained
ysr@1580 1632 // into multiple pieces; record the split-death of the original
ysr@1580 1633 splitDeath(replenish_size);
ysr@1580 1634 }
ysr@1580 1635 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1636 // The last chunk is not added to the lists but is returned as the
ysr@1580 1637 // free chunk.
ysr@1580 1638 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1639 i = 0;
ysr@1580 1640 i < (num_blk - 1);
ysr@1580 1641 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1642 i++) {
jmasa@3732 1643 curFc->set_size(size);
ysr@1580 1644 // Don't record this as a return in order to try and
ysr@1580 1645 // determine the "returns" from a GC.
ysr@1580 1646 _bt.verify_not_unallocated((HeapWord*) fc, size);
jmasa@3732 1647 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
ysr@1580 1648 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1649 split_birth(size);
ysr@1580 1650 // Don't record the initial population of the indexed list
ysr@1580 1651 // as a split birth.
ysr@1580 1652 }
ysr@1580 1653
ysr@1580 1654 // check that the arithmetic was OK above
ysr@1580 1655 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1656 "inconsistency in carving newFc");
jmasa@3732 1657 curFc->set_size(size);
duke@435 1658 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1659 split_birth(size);
ysr@1580 1660 fc = curFc;
ysr@1580 1661 } else {
ysr@1580 1662 // Return entire block to caller
ysr@1580 1663 fc = newFc;
duke@435 1664 }
duke@435 1665 }
duke@435 1666 }
duke@435 1667 } else {
duke@435 1668 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1669 // replenish the indexed free list.
duke@435 1670 fc = getChunkFromDictionaryExact(size);
duke@435 1671 }
jmasa@3732 1672 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
duke@435 1673 return fc;
duke@435 1674 }
duke@435 1675
duke@435 1676 FreeChunk*
duke@435 1677 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1678 assert_locked();
jmasa@4488 1679 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1680 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1681 if (fc == NULL) {
duke@435 1682 return NULL;
duke@435 1683 }
duke@435 1684 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1685 if (fc->size() >= size + MinChunkSize) {
duke@435 1686 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1687 }
duke@435 1688 assert(fc->size() >= size, "chunk too small");
duke@435 1689 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1690 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1691 return fc;
duke@435 1692 }
duke@435 1693
duke@435 1694 FreeChunk*
duke@435 1695 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1696 assert_locked();
jmasa@4488 1697 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1698 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1699 if (fc == NULL) {
duke@435 1700 return fc;
duke@435 1701 }
duke@435 1702 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1703 if (fc->size() == size) {
duke@435 1704 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1705 return fc;
duke@435 1706 }
jmasa@3732 1707 assert(fc->size() > size, "get_chunk() guarantee");
duke@435 1708 if (fc->size() < size + MinChunkSize) {
duke@435 1709 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1710 returnChunkToDictionary(fc);
jmasa@4488 1711 fc = _dictionary->get_chunk(size + MinChunkSize,
jmasa@4488 1712 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1713 if (fc == NULL) {
duke@435 1714 return NULL;
duke@435 1715 }
duke@435 1716 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1717 }
duke@435 1718 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1719 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1720 assert(fc->size() == size, "chunk is wrong size");
duke@435 1721 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1722 return fc;
duke@435 1723 }
duke@435 1724
duke@435 1725 void
duke@435 1726 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1727 assert_locked();
duke@435 1728
duke@435 1729 size_t size = chunk->size();
duke@435 1730 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1731 // adjust _unallocated_block downward, as necessary
duke@435 1732 _bt.freed((HeapWord*)chunk, size);
jmasa@3732 1733 _dictionary->return_chunk(chunk);
ysr@1580 1734 #ifndef PRODUCT
ysr@1580 1735 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
goetz@6337 1736 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
goetz@6337 1737 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
jmasa@4196 1738 tl->verify_stats();
ysr@1580 1739 }
ysr@1580 1740 #endif // PRODUCT
duke@435 1741 }
duke@435 1742
duke@435 1743 void
duke@435 1744 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1745 assert_locked();
duke@435 1746 size_t size = fc->size();
duke@435 1747 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1748 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1749 if (_adaptive_freelists) {
jmasa@3732 1750 _indexedFreeList[size].return_chunk_at_tail(fc);
duke@435 1751 } else {
jmasa@3732 1752 _indexedFreeList[size].return_chunk_at_head(fc);
duke@435 1753 }
ysr@1580 1754 #ifndef PRODUCT
ysr@1580 1755 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1756 _indexedFreeList[size].verify_stats();
ysr@1580 1757 }
ysr@1580 1758 #endif // PRODUCT
duke@435 1759 }
duke@435 1760
duke@435 1761 // Add chunk to end of last block -- if it's the largest
duke@435 1762 // block -- and update BOT and census data. We would
duke@435 1763 // of course have preferred to coalesce it with the
duke@435 1764 // last block, but it's currently less expensive to find the
duke@435 1765 // largest block than it is to find the last.
duke@435 1766 void
duke@435 1767 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1768 HeapWord* chunk, size_t size) {
duke@435 1769 // check that the chunk does lie in this space!
duke@435 1770 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1771 // One of the parallel gc task threads may be here
duke@435 1772 // whilst others are allocating.
duke@435 1773 Mutex* lock = NULL;
duke@435 1774 if (ParallelGCThreads != 0) {
duke@435 1775 lock = &_parDictionaryAllocLock;
duke@435 1776 }
duke@435 1777 FreeChunk* ec;
duke@435 1778 {
duke@435 1779 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
jmasa@3732 1780 ec = dictionary()->find_largest_dict(); // get largest block
jmasa@4196 1781 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
duke@435 1782 // It's a coterminal block - we can coalesce.
duke@435 1783 size_t old_size = ec->size();
duke@435 1784 coalDeath(old_size);
duke@435 1785 removeChunkFromDictionary(ec);
duke@435 1786 size += old_size;
duke@435 1787 } else {
duke@435 1788 ec = (FreeChunk*)chunk;
duke@435 1789 }
duke@435 1790 }
jmasa@3732 1791 ec->set_size(size);
duke@435 1792 debug_only(ec->mangleFreed(size));
brutisso@5166 1793 if (size < SmallForDictionary && ParallelGCThreads != 0) {
duke@435 1794 lock = _indexedFreeListParLocks[size];
duke@435 1795 }
duke@435 1796 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1797 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1798 // record the birth under the lock since the recording involves
duke@435 1799 // manipulation of the list on which the chunk lives and
duke@435 1800 // if the chunk is allocated and is the last on the list,
duke@435 1801 // the list can go away.
duke@435 1802 coalBirth(size);
duke@435 1803 }
duke@435 1804
duke@435 1805 void
duke@435 1806 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1807 size_t size) {
duke@435 1808 // check that the chunk does lie in this space!
duke@435 1809 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1810 assert_locked();
duke@435 1811 _bt.verify_single_block(chunk, size);
duke@435 1812
duke@435 1813 FreeChunk* fc = (FreeChunk*) chunk;
jmasa@3732 1814 fc->set_size(size);
duke@435 1815 debug_only(fc->mangleFreed(size));
duke@435 1816 if (size < SmallForDictionary) {
duke@435 1817 returnChunkToFreeList(fc);
duke@435 1818 } else {
duke@435 1819 returnChunkToDictionary(fc);
duke@435 1820 }
duke@435 1821 }
duke@435 1822
duke@435 1823 void
duke@435 1824 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1825 size_t size, bool coalesced) {
duke@435 1826 assert_locked();
duke@435 1827 assert(chunk != NULL, "null chunk");
duke@435 1828 if (coalesced) {
duke@435 1829 // repair BOT
duke@435 1830 _bt.single_block(chunk, size);
duke@435 1831 }
duke@435 1832 addChunkToFreeLists(chunk, size);
duke@435 1833 }
duke@435 1834
duke@435 1835 // We _must_ find the purported chunk on our free lists;
duke@435 1836 // we assert if we don't.
duke@435 1837 void
duke@435 1838 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1839 size_t size = fc->size();
duke@435 1840 assert_locked();
duke@435 1841 debug_only(verifyFreeLists());
duke@435 1842 if (size < SmallForDictionary) {
duke@435 1843 removeChunkFromIndexedFreeList(fc);
duke@435 1844 } else {
duke@435 1845 removeChunkFromDictionary(fc);
duke@435 1846 }
duke@435 1847 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1848 debug_only(verifyFreeLists());
duke@435 1849 }
duke@435 1850
duke@435 1851 void
duke@435 1852 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1853 size_t size = fc->size();
duke@435 1854 assert_locked();
duke@435 1855 assert(fc != NULL, "null chunk");
duke@435 1856 _bt.verify_single_block((HeapWord*)fc, size);
jmasa@3732 1857 _dictionary->remove_chunk(fc);
duke@435 1858 // adjust _unallocated_block upward, as necessary
duke@435 1859 _bt.allocated((HeapWord*)fc, size);
duke@435 1860 }
duke@435 1861
duke@435 1862 void
duke@435 1863 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1864 assert_locked();
duke@435 1865 size_t size = fc->size();
duke@435 1866 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1867 NOT_PRODUCT(
duke@435 1868 if (FLSVerifyIndexTable) {
duke@435 1869 verifyIndexedFreeList(size);
duke@435 1870 }
duke@435 1871 )
jmasa@3732 1872 _indexedFreeList[size].remove_chunk(fc);
duke@435 1873 NOT_PRODUCT(
duke@435 1874 if (FLSVerifyIndexTable) {
duke@435 1875 verifyIndexedFreeList(size);
duke@435 1876 }
duke@435 1877 )
duke@435 1878 }
duke@435 1879
duke@435 1880 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1881 /* A hint is the next larger size that has a surplus.
duke@435 1882 Start search at a size large enough to guarantee that
duke@435 1883 the excess is >= MIN_CHUNK. */
duke@435 1884 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1885 if (start < IndexSetSize) {
jmasa@4196 1886 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
duke@435 1887 size_t hint = _indexedFreeList[start].hint();
duke@435 1888 while (hint < IndexSetSize) {
duke@435 1889 assert(hint % MinObjAlignment == 0, "hint should be aligned");
jmasa@4196 1890 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
duke@435 1891 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1892 // Found a list with surplus, reset original hint
duke@435 1893 // and split out a free chunk which is returned.
duke@435 1894 _indexedFreeList[start].set_hint(hint);
duke@435 1895 FreeChunk* res = getFromListGreater(fl, numWords);
jmasa@3732 1896 assert(res == NULL || res->is_free(),
duke@435 1897 "Should be returning a free chunk");
duke@435 1898 return res;
duke@435 1899 }
duke@435 1900 hint = fl->hint(); /* keep looking */
duke@435 1901 }
duke@435 1902 /* None found. */
duke@435 1903 it[start].set_hint(IndexSetSize);
duke@435 1904 }
duke@435 1905 return NULL;
duke@435 1906 }
duke@435 1907
duke@435 1908 /* Requires fl->size >= numWords + MinChunkSize */
jmasa@4196 1909 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
duke@435 1910 size_t numWords) {
duke@435 1911 FreeChunk *curr = fl->head();
duke@435 1912 size_t oldNumWords = curr->size();
duke@435 1913 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1914 assert(curr != NULL, "List is empty");
duke@435 1915 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1916 "Size of chunks in the list is too small");
duke@435 1917
jmasa@3732 1918 fl->remove_chunk(curr);
duke@435 1919 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1920 // smallSplit(oldNumWords, numWords);
duke@435 1921 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1922 // Does anything have to be done for the remainder in terms of
duke@435 1923 // fixing the card table?
jmasa@3732 1924 assert(new_chunk == NULL || new_chunk->is_free(),
duke@435 1925 "Should be returning a free chunk");
duke@435 1926 return new_chunk;
duke@435 1927 }
duke@435 1928
duke@435 1929 FreeChunk*
duke@435 1930 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1931 size_t new_size) {
duke@435 1932 assert_locked();
duke@435 1933 size_t size = chunk->size();
duke@435 1934 assert(size > new_size, "Split from a smaller block?");
duke@435 1935 assert(is_aligned(chunk), "alignment problem");
duke@435 1936 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1937 size_t rem_size = size - new_size;
duke@435 1938 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1939 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1940 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1941 assert(is_aligned(ffc), "alignment problem");
jmasa@3732 1942 ffc->set_size(rem_size);
jmasa@3732 1943 ffc->link_next(NULL);
jmasa@3732 1944 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1945 // Above must occur before BOT is updated below.
duke@435 1946 // adjust block offset table
ysr@2071 1947 OrderAccess::storestore();
jmasa@3732 1948 assert(chunk->is_free() && ffc->is_free(), "Error");
duke@435 1949 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1950 if (rem_size < SmallForDictionary) {
duke@435 1951 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1952 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1953 assert(!is_par ||
jmasa@3294 1954 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1955 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1956 returnChunkToFreeList(ffc);
duke@435 1957 split(size, rem_size);
duke@435 1958 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1959 } else {
duke@435 1960 returnChunkToDictionary(ffc);
duke@435 1961 split(size ,rem_size);
duke@435 1962 }
jmasa@3732 1963 chunk->set_size(new_size);
duke@435 1964 return chunk;
duke@435 1965 }
duke@435 1966
duke@435 1967 void
duke@435 1968 CompactibleFreeListSpace::sweep_completed() {
duke@435 1969 // Now that space is probably plentiful, refill linear
duke@435 1970 // allocation blocks as needed.
duke@435 1971 refillLinearAllocBlocksIfNeeded();
duke@435 1972 }
duke@435 1973
duke@435 1974 void
duke@435 1975 CompactibleFreeListSpace::gc_prologue() {
duke@435 1976 assert_locked();
duke@435 1977 if (PrintFLSStatistics != 0) {
duke@435 1978 gclog_or_tty->print("Before GC:\n");
duke@435 1979 reportFreeListStatistics();
duke@435 1980 }
duke@435 1981 refillLinearAllocBlocksIfNeeded();
duke@435 1982 }
duke@435 1983
duke@435 1984 void
duke@435 1985 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1986 assert_locked();
duke@435 1987 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1988 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1989 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1990 }
duke@435 1991 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1992 _promoInfo.stopTrackingPromotions();
duke@435 1993 repairLinearAllocationBlocks();
duke@435 1994 // Print Space's stats
duke@435 1995 if (PrintFLSStatistics != 0) {
duke@435 1996 gclog_or_tty->print("After GC:\n");
duke@435 1997 reportFreeListStatistics();
duke@435 1998 }
duke@435 1999 }
duke@435 2000
duke@435 2001 // Iteration support, mostly delegated from a CMS generation
duke@435 2002
duke@435 2003 void CompactibleFreeListSpace::save_marks() {
ysr@2825 2004 assert(Thread::current()->is_VM_thread(),
ysr@2825 2005 "Global variable should only be set when single-threaded");
ysr@2825 2006 // Mark the "end" of the used space at the time of this call;
duke@435 2007 // note, however, that promoted objects from this point
duke@435 2008 // on are tracked in the _promoInfo below.
ysr@2071 2009 set_saved_mark_word(unallocated_block());
ysr@2825 2010 #ifdef ASSERT
ysr@2825 2011 // Check the sanity of save_marks() etc.
ysr@2825 2012 MemRegion ur = used_region();
ysr@2825 2013 MemRegion urasm = used_region_at_save_marks();
ysr@2825 2014 assert(ur.contains(urasm),
ysr@2825 2015 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 2016 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
drchase@6680 2017 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
ysr@2825 2018 #endif
duke@435 2019 // inform allocator that promotions should be tracked.
duke@435 2020 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 2021 _promoInfo.startTrackingPromotions();
duke@435 2022 }
duke@435 2023
duke@435 2024 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 2025 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 2026 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 2027 "Shouldn't be called if using parallel gc.");
duke@435 2028 return _promoInfo.noPromotions();
duke@435 2029 }
duke@435 2030
duke@435 2031 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2032 \
duke@435 2033 void CompactibleFreeListSpace:: \
duke@435 2034 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2035 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2036 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2037 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2038 /* \
duke@435 2039 * This also restores any displaced headers and removes the elements from \
duke@435 2040 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2041 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2042 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2043 */ \
duke@435 2044 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2045 }
duke@435 2046
duke@435 2047 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2048
ysr@447 2049 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2050 return _smallLinearAllocBlock._word_size == 0;
duke@435 2051 }
duke@435 2052
duke@435 2053 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2054 // Fix up linear allocation blocks to look like free blocks
duke@435 2055 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2056 }
duke@435 2057
duke@435 2058 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2059 assert_locked();
duke@435 2060 if (blk->_ptr != NULL) {
duke@435 2061 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2062 "Minimum block size requirement");
duke@435 2063 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
jmasa@3732 2064 fc->set_size(blk->_word_size);
jmasa@3732 2065 fc->link_prev(NULL); // mark as free
duke@435 2066 fc->dontCoalesce();
jmasa@3732 2067 assert(fc->is_free(), "just marked it free");
duke@435 2068 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2069 }
duke@435 2070 }
duke@435 2071
duke@435 2072 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2073 assert_locked();
duke@435 2074 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2075 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2076 "Size of linAB should be zero if the ptr is NULL");
duke@435 2077 // Reset the linAB refill and allocation size limit.
duke@435 2078 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2079 }
duke@435 2080 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2081 }
duke@435 2082
duke@435 2083 void
duke@435 2084 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2085 assert_locked();
duke@435 2086 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2087 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2088 "blk invariant");
duke@435 2089 if (blk->_ptr == NULL) {
duke@435 2090 refillLinearAllocBlock(blk);
duke@435 2091 }
duke@435 2092 if (PrintMiscellaneous && Verbose) {
duke@435 2093 if (blk->_word_size == 0) {
duke@435 2094 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2095 }
duke@435 2096 }
duke@435 2097 }
duke@435 2098
duke@435 2099 void
duke@435 2100 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2101 assert_locked();
duke@435 2102 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2103 "linear allocation block should be empty");
duke@435 2104 FreeChunk* fc;
duke@435 2105 if (blk->_refillSize < SmallForDictionary &&
duke@435 2106 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2107 // A linAB's strategy might be to use small sizes to reduce
duke@435 2108 // fragmentation but still get the benefits of allocation from a
duke@435 2109 // linAB.
duke@435 2110 } else {
duke@435 2111 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2112 }
duke@435 2113 if (fc != NULL) {
duke@435 2114 blk->_ptr = (HeapWord*)fc;
duke@435 2115 blk->_word_size = fc->size();
duke@435 2116 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2117 }
duke@435 2118 }
duke@435 2119
ysr@447 2120 // Support for concurrent collection policy decisions.
ysr@447 2121 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2122 // In the future we might want to add in frgamentation stats --
ysr@447 2123 // including erosion of the "mountain" into this decision as well.
ysr@447 2124 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2125 }
ysr@447 2126
duke@435 2127 // Support for compaction
duke@435 2128
duke@435 2129 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2130 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2131 // prepare_for_compaction() uses the space between live objects
duke@435 2132 // so that later phase can skip dead space quickly. So verification
duke@435 2133 // of the free lists doesn't work after.
duke@435 2134 }
duke@435 2135
duke@435 2136 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2137 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2138
duke@435 2139 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2140 // In other versions of adjust_pointers(), a bail out
duke@435 2141 // based on the amount of live data in the generation
duke@435 2142 // (i.e., if 0, bail out) may be used.
duke@435 2143 // Cannot test used() == 0 here because the free lists have already
duke@435 2144 // been mangled by the compaction.
duke@435 2145
duke@435 2146 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2147 // See note about verification in prepare_for_compaction().
duke@435 2148 }
duke@435 2149
duke@435 2150 void CompactibleFreeListSpace::compact() {
duke@435 2151 SCAN_AND_COMPACT(obj_size);
duke@435 2152 }
duke@435 2153
duke@435 2154 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2155 // where fbs is free block sizes
duke@435 2156 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2157 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2158 double frag = 0.0;
duke@435 2159 size_t i;
duke@435 2160
duke@435 2161 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2162 double sz = i;
duke@435 2163 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2164 }
duke@435 2165
duke@435 2166 double totFree = itabFree +
jmasa@3732 2167 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
duke@435 2168 if (totFree > 0) {
duke@435 2169 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2170 (totFree * totFree));
duke@435 2171 frag = (double)1.0 - frag;
duke@435 2172 } else {
duke@435 2173 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2174 }
duke@435 2175 return frag;
duke@435 2176 }
duke@435 2177
duke@435 2178 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2179 float inter_sweep_current,
ysr@1580 2180 float inter_sweep_estimate,
ysr@1580 2181 float intra_sweep_estimate) {
duke@435 2182 assert_locked();
duke@435 2183 size_t i;
duke@435 2184 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2185 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
ysr@1580 2186 if (PrintFLSStatistics > 1) {
drchase@6680 2187 gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
ysr@1580 2188 }
ysr@1580 2189 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
jmasa@3732 2190 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
jmasa@3732 2191 fl->set_before_sweep(fl->count());
jmasa@3732 2192 fl->set_bfr_surp(fl->surplus());
duke@435 2193 }
jmasa@3732 2194 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
duke@435 2195 inter_sweep_current,
ysr@1580 2196 inter_sweep_estimate,
ysr@1580 2197 intra_sweep_estimate);
duke@435 2198 }
duke@435 2199
duke@435 2200 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2201 assert_locked();
duke@435 2202 size_t i;
duke@435 2203 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2204 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2205 fl->set_surplus(fl->count() -
ysr@1580 2206 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2207 }
duke@435 2208 }
duke@435 2209
duke@435 2210 void CompactibleFreeListSpace::setFLHints() {
duke@435 2211 assert_locked();
duke@435 2212 size_t i;
duke@435 2213 size_t h = IndexSetSize;
duke@435 2214 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
jmasa@4196 2215 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2216 fl->set_hint(h);
duke@435 2217 if (fl->surplus() > 0) {
duke@435 2218 h = i;
duke@435 2219 }
duke@435 2220 }
duke@435 2221 }
duke@435 2222
duke@435 2223 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2224 assert_locked();
ysr@3264 2225 size_t i;
duke@435 2226 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2227 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2228 fl->set_prev_sweep(fl->count());
jmasa@3732 2229 fl->set_coal_births(0);
jmasa@3732 2230 fl->set_coal_deaths(0);
jmasa@3732 2231 fl->set_split_births(0);
jmasa@3732 2232 fl->set_split_deaths(0);
duke@435 2233 }
duke@435 2234 }
duke@435 2235
ysr@447 2236 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2237 if (PrintFLSStatistics > 0) {
jmasa@3732 2238 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
ysr@1580 2239 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
drchase@6680 2240 p2i(largestAddr));
ysr@1580 2241 }
duke@435 2242 setFLSurplus();
duke@435 2243 setFLHints();
duke@435 2244 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2245 printFLCensus(sweep_count);
duke@435 2246 }
duke@435 2247 clearFLCensus();
duke@435 2248 assert_locked();
jmasa@3732 2249 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
duke@435 2250 }
duke@435 2251
duke@435 2252 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2253 if (size < SmallForDictionary) {
jmasa@4196 2254 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2255 return (fl->coal_desired() < 0) ||
jmasa@3732 2256 ((int)fl->count() > fl->coal_desired());
duke@435 2257 } else {
jmasa@3732 2258 return dictionary()->coal_dict_over_populated(size);
duke@435 2259 }
duke@435 2260 }
duke@435 2261
duke@435 2262 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2263 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2264 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2265 fl->increment_coal_births();
duke@435 2266 fl->increment_surplus();
duke@435 2267 }
duke@435 2268
duke@435 2269 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2270 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2271 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2272 fl->increment_coal_deaths();
duke@435 2273 fl->decrement_surplus();
duke@435 2274 }
duke@435 2275
duke@435 2276 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2277 if (size < SmallForDictionary) {
duke@435 2278 smallCoalBirth(size);
duke@435 2279 } else {
jmasa@4196 2280 dictionary()->dict_census_update(size,
duke@435 2281 false /* split */,
duke@435 2282 true /* birth */);
duke@435 2283 }
duke@435 2284 }
duke@435 2285
duke@435 2286 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2287 if(size < SmallForDictionary) {
duke@435 2288 smallCoalDeath(size);
duke@435 2289 } else {
jmasa@4196 2290 dictionary()->dict_census_update(size,
duke@435 2291 false /* split */,
duke@435 2292 false /* birth */);
duke@435 2293 }
duke@435 2294 }
duke@435 2295
duke@435 2296 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2297 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2298 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2299 fl->increment_split_births();
duke@435 2300 fl->increment_surplus();
duke@435 2301 }
duke@435 2302
duke@435 2303 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2304 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2305 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2306 fl->increment_split_deaths();
duke@435 2307 fl->decrement_surplus();
duke@435 2308 }
duke@435 2309
jmasa@3732 2310 void CompactibleFreeListSpace::split_birth(size_t size) {
duke@435 2311 if (size < SmallForDictionary) {
duke@435 2312 smallSplitBirth(size);
duke@435 2313 } else {
jmasa@4196 2314 dictionary()->dict_census_update(size,
duke@435 2315 true /* split */,
duke@435 2316 true /* birth */);
duke@435 2317 }
duke@435 2318 }
duke@435 2319
duke@435 2320 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2321 if (size < SmallForDictionary) {
duke@435 2322 smallSplitDeath(size);
duke@435 2323 } else {
jmasa@4196 2324 dictionary()->dict_census_update(size,
duke@435 2325 true /* split */,
duke@435 2326 false /* birth */);
duke@435 2327 }
duke@435 2328 }
duke@435 2329
duke@435 2330 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2331 size_t to2 = from - to1;
duke@435 2332 splitDeath(from);
jmasa@3732 2333 split_birth(to1);
jmasa@3732 2334 split_birth(to2);
duke@435 2335 }
duke@435 2336
duke@435 2337 void CompactibleFreeListSpace::print() const {
ysr@2294 2338 print_on(tty);
duke@435 2339 }
duke@435 2340
duke@435 2341 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2342 assert_locked();
duke@435 2343 repairLinearAllocationBlocks();
duke@435 2344 // Verify that the SpoolBlocks look like free blocks of
duke@435 2345 // appropriate sizes... To be done ...
duke@435 2346 }
duke@435 2347
duke@435 2348 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2349 private:
duke@435 2350 const CompactibleFreeListSpace* _sp;
duke@435 2351 const MemRegion _span;
ysr@2071 2352 HeapWord* _last_addr;
ysr@2071 2353 size_t _last_size;
ysr@2071 2354 bool _last_was_obj;
ysr@2071 2355 bool _last_was_live;
duke@435 2356
duke@435 2357 public:
duke@435 2358 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2359 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2360 _last_addr(NULL), _last_size(0),
ysr@2071 2361 _last_was_obj(false), _last_was_live(false) { }
duke@435 2362
coleenp@548 2363 virtual size_t do_blk(HeapWord* addr) {
duke@435 2364 size_t res;
ysr@2071 2365 bool was_obj = false;
ysr@2071 2366 bool was_live = false;
duke@435 2367 if (_sp->block_is_obj(addr)) {
ysr@2071 2368 was_obj = true;
duke@435 2369 oop p = oop(addr);
duke@435 2370 guarantee(p->is_oop(), "Should be an oop");
duke@435 2371 res = _sp->adjustObjectSize(p->size());
duke@435 2372 if (_sp->obj_is_alive(addr)) {
ysr@2071 2373 was_live = true;
duke@435 2374 p->verify();
duke@435 2375 }
duke@435 2376 } else {
duke@435 2377 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2378 res = fc->size();
duke@435 2379 if (FLSVerifyLists && !fc->cantCoalesce()) {
jmasa@3732 2380 guarantee(_sp->verify_chunk_in_free_list(fc),
duke@435 2381 "Chunk should be on a free list");
duke@435 2382 }
duke@435 2383 }
ysr@2071 2384 if (res == 0) {
ysr@2071 2385 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2386 gclog_or_tty->print_cr(
ysr@2071 2387 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2388 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
drchase@6680 2389 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
drchase@6680 2390 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2391 _sp->print_on(gclog_or_tty);
ysr@2071 2392 guarantee(false, "Seppuku!");
ysr@2071 2393 }
ysr@2071 2394 _last_addr = addr;
ysr@2071 2395 _last_size = res;
ysr@2071 2396 _last_was_obj = was_obj;
ysr@2071 2397 _last_was_live = was_live;
duke@435 2398 return res;
duke@435 2399 }
duke@435 2400 };
duke@435 2401
duke@435 2402 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2403 private:
duke@435 2404 const CMSCollector* _collector;
duke@435 2405 const CompactibleFreeListSpace* _sp;
duke@435 2406 const MemRegion _span;
duke@435 2407 const bool _past_remark;
duke@435 2408 const CMSBitMap* _bit_map;
duke@435 2409
coleenp@548 2410 protected:
coleenp@548 2411 void do_oop(void* p, oop obj) {
coleenp@548 2412 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2413 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2414 // Should be a valid object; the first disjunct below allows
coleenp@548 2415 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2416 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2417 // are spanned by _span (CMS heap) above.
coleenp@548 2418 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2419 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2420 "Should be an object");
coleenp@548 2421 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2422 obj->verify();
coleenp@548 2423 if (_past_remark) {
coleenp@548 2424 // Remark has been completed, the object should be marked
coleenp@548 2425 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2426 }
coleenp@548 2427 } else { // reference within CMS heap
coleenp@548 2428 if (_past_remark) {
coleenp@548 2429 // Remark has been completed -- so the referent should have
coleenp@548 2430 // been marked, if referring object is.
coleenp@548 2431 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2432 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2433 }
coleenp@548 2434 }
coleenp@548 2435 }
coleenp@548 2436 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2437 // the reference is from FLS, and points out of FLS
coleenp@548 2438 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2439 obj->verify();
coleenp@548 2440 }
coleenp@548 2441 }
coleenp@548 2442
coleenp@548 2443 template <class T> void do_oop_work(T* p) {
coleenp@548 2444 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2445 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2446 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2447 do_oop(p, obj);
coleenp@548 2448 }
coleenp@548 2449 }
coleenp@548 2450
duke@435 2451 public:
duke@435 2452 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2453 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2454 bool past_remark, CMSBitMap* bit_map) :
coleenp@4037 2455 _collector(collector), _sp(sp), _span(span),
duke@435 2456 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2457
coleenp@548 2458 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2459 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2460 };
duke@435 2461
brutisso@3711 2462 void CompactibleFreeListSpace::verify() const {
duke@435 2463 assert_lock_strong(&_freelistLock);
duke@435 2464 verify_objects_initialized();
duke@435 2465 MemRegion span = _collector->_span;
duke@435 2466 bool past_remark = (_collector->abstract_state() ==
duke@435 2467 CMSCollector::Sweeping);
duke@435 2468
duke@435 2469 ResourceMark rm;
duke@435 2470 HandleMark hm;
duke@435 2471
duke@435 2472 // Check integrity of CFL data structures
duke@435 2473 _promoInfo.verify();
duke@435 2474 _dictionary->verify();
duke@435 2475 if (FLSVerifyIndexTable) {
duke@435 2476 verifyIndexedFreeLists();
duke@435 2477 }
duke@435 2478 // Check integrity of all objects and free blocks in space
duke@435 2479 {
duke@435 2480 VerifyAllBlksClosure cl(this, span);
duke@435 2481 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2482 }
duke@435 2483 // Check that all references in the heap to FLS
duke@435 2484 // are to valid objects in FLS or that references in
duke@435 2485 // FLS are to valid objects elsewhere in the heap
duke@435 2486 if (FLSVerifyAllHeapReferences)
duke@435 2487 {
duke@435 2488 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2489 _collector->markBitMap());
duke@435 2490 CollectedHeap* ch = Universe::heap();
coleenp@4037 2491
coleenp@4037 2492 // Iterate over all oops in the heap. Uses the _no_header version
coleenp@4037 2493 // since we are not interested in following the klass pointers.
coleenp@4037 2494 ch->oop_iterate_no_header(&cl);
duke@435 2495 }
duke@435 2496
duke@435 2497 if (VerifyObjectStartArray) {
duke@435 2498 // Verify the block offset table
duke@435 2499 _bt.verify();
duke@435 2500 }
duke@435 2501 }
duke@435 2502
duke@435 2503 #ifndef PRODUCT
duke@435 2504 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2505 if (FLSVerifyLists) {
duke@435 2506 _dictionary->verify();
duke@435 2507 verifyIndexedFreeLists();
duke@435 2508 } else {
duke@435 2509 if (FLSVerifyDictionary) {
duke@435 2510 _dictionary->verify();
duke@435 2511 }
duke@435 2512 if (FLSVerifyIndexTable) {
duke@435 2513 verifyIndexedFreeLists();
duke@435 2514 }
duke@435 2515 }
duke@435 2516 }
duke@435 2517 #endif
duke@435 2518
duke@435 2519 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2520 size_t i = 0;
ysr@3264 2521 for (; i < IndexSetStart; i++) {
duke@435 2522 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2523 }
duke@435 2524 for (; i < IndexSetSize; i++) {
duke@435 2525 verifyIndexedFreeList(i);
duke@435 2526 }
duke@435 2527 }
duke@435 2528
duke@435 2529 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2530 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2531 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2532 size_t num = _indexedFreeList[size].count();
ysr@1580 2533 size_t n = 0;
ysr@3264 2534 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2535 "Slot should have been empty");
ysr@1580 2536 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2537 guarantee(fc->size() == size, "Size inconsistency");
jmasa@3732 2538 guarantee(fc->is_free(), "!free?");
duke@435 2539 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2540 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2541 }
ysr@1580 2542 guarantee(n == num, "Incorrect count");
duke@435 2543 }
duke@435 2544
duke@435 2545 #ifndef PRODUCT
ysr@3220 2546 void CompactibleFreeListSpace::check_free_list_consistency() const {
goetz@6337 2547 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
duke@435 2548 "Some sizes can't be allocated without recourse to"
duke@435 2549 " linear allocation buffers");
goetz@6337 2550 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
duke@435 2551 "else MIN_TREE_CHUNK_SIZE is wrong");
brutisso@3807 2552 assert(IndexSetStart != 0, "IndexSetStart not initialized");
brutisso@3807 2553 assert(IndexSetStride != 0, "IndexSetStride not initialized");
duke@435 2554 }
duke@435 2555 #endif
duke@435 2556
ysr@447 2557 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2558 assert_lock_strong(&_freelistLock);
jmasa@4196 2559 AdaptiveFreeList<FreeChunk> total;
ysr@447 2560 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
jmasa@4196 2561 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
jmasa@3732 2562 size_t total_free = 0;
duke@435 2563 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2564 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2565 total_free += fl->count() * fl->size();
ysr@447 2566 if (i % (40*IndexSetStride) == 0) {
jmasa@4196 2567 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
ysr@447 2568 }
ysr@447 2569 fl->print_on(gclog_or_tty);
jmasa@3732 2570 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
ysr@447 2571 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2572 total.set_desired( total.desired() + fl->desired() );
jmasa@3732 2573 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
jmasa@3732 2574 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
ysr@447 2575 total.set_count( total.count() + fl->count() );
jmasa@3732 2576 total.set_coal_births( total.coal_births() + fl->coal_births() );
jmasa@3732 2577 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
jmasa@3732 2578 total.set_split_births(total.split_births() + fl->split_births());
jmasa@3732 2579 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
duke@435 2580 }
ysr@447 2581 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2582 gclog_or_tty->print_cr("Total free in indexed lists "
jmasa@3732 2583 SIZE_FORMAT " words", total_free);
duke@435 2584 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
jmasa@3732 2585 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
jmasa@3732 2586 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
ysr@447 2587 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
jmasa@3732 2588 _dictionary->print_dict_census();
duke@435 2589 }
duke@435 2590
ysr@1580 2591 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2592 // CFLS_LAB
ysr@1580 2593 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2594
ysr@1580 2595 #define VECTOR_257(x) \
ysr@1580 2596 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2597 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2598 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2599 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2600 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2601 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2602 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2603 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2604 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2605 x }
ysr@1580 2606
ysr@1580 2607 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2608 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2609 // command-line, this will get reinitialized via a call to
ysr@1580 2610 // modify_initialization() below.
ysr@1580 2611 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2612 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2613 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2614 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2615
duke@435 2616 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2617 _cfls(cfls)
duke@435 2618 {
ysr@1580 2619 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2620 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2621 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2622 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2623 _indexedFreeList[i].set_size(i);
ysr@1580 2624 _num_blocks[i] = 0;
ysr@1580 2625 }
ysr@1580 2626 }
ysr@1580 2627
ysr@1580 2628 static bool _CFLS_LAB_modified = false;
ysr@1580 2629
ysr@1580 2630 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2631 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2632 _CFLS_LAB_modified = true;
ysr@1580 2633 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2634 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2635 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2636 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2637 }
duke@435 2638 }
duke@435 2639
duke@435 2640 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2641 FreeChunk* res;
ysr@2132 2642 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2643 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2644 // This locking manages sync with other large object allocations.
duke@435 2645 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2646 Mutex::_no_safepoint_check_flag);
duke@435 2647 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2648 if (res == NULL) return NULL;
duke@435 2649 } else {
jmasa@4196 2650 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
duke@435 2651 if (fl->count() == 0) {
duke@435 2652 // Attempt to refill this local free list.
ysr@1580 2653 get_from_global_pool(word_sz, fl);
duke@435 2654 // If it didn't work, give up.
duke@435 2655 if (fl->count() == 0) return NULL;
duke@435 2656 }
jmasa@3732 2657 res = fl->get_chunk_at_head();
duke@435 2658 assert(res != NULL, "Why was count non-zero?");
duke@435 2659 }
duke@435 2660 res->markNotFree();
jmasa@3732 2661 assert(!res->is_free(), "shouldn't be marked free");
coleenp@622 2662 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2663 // mangle a just allocated object with a distinct pattern.
duke@435 2664 debug_only(res->mangleAllocated(word_sz));
duke@435 2665 return (HeapWord*)res;
duke@435 2666 }
duke@435 2667
ysr@1580 2668 // Get a chunk of blocks of the right size and update related
ysr@1580 2669 // book-keeping stats
jmasa@4196 2670 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
ysr@1580 2671 // Get the #blocks we want to claim
ysr@1580 2672 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2673 assert(n_blks > 0, "Error");
jcoomes@7345 2674 assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2675 // In some cases, when the application has a phase change,
ysr@1580 2676 // there may be a sudden and sharp shift in the object survival
ysr@1580 2677 // profile, and updating the counts at the end of a scavenge
ysr@1580 2678 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2679 // during these phase changes. It is beneficial to detect such
ysr@1580 2680 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2681 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2682 // It is protected by a product flag until we have gained
ysr@1580 2683 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2684 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2685 // small spikes, so some kind of historical smoothing based on
ysr@1580 2686 // previous experience with the greater reactivity might be useful.
ysr@1580 2687 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2688 // default.
ysr@1580 2689 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2690 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2691 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2692 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2693 }
ysr@1580 2694 assert(n_blks > 0, "Error");
ysr@1580 2695 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2696 // Update stats table entry for this block size
ysr@1580 2697 _num_blocks[word_sz] += fl->count();
ysr@1580 2698 }
ysr@1580 2699
ysr@1580 2700 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2701 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2702 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2703 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2704 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2705 "Counter inconsistency");
ysr@1580 2706 if (_global_num_workers[i] > 0) {
ysr@1580 2707 // Need to smooth wrt historical average
ysr@1580 2708 if (ResizeOldPLAB) {
ysr@1580 2709 _blocks_to_claim[i].sample(
ysr@1580 2710 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2711 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2712 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2713 }
ysr@1580 2714 // Reset counters for next round
ysr@1580 2715 _global_num_workers[i] = 0;
ysr@1580 2716 _global_num_blocks[i] = 0;
ysr@1580 2717 if (PrintOldPLAB) {
drchase@6680 2718 gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2719 }
duke@435 2720 }
duke@435 2721 }
duke@435 2722 }
duke@435 2723
ysr@3220 2724 // If this is changed in the future to allow parallel
ysr@3220 2725 // access, one would need to take the FL locks and,
ysr@3220 2726 // depending on how it is used, stagger access from
ysr@3220 2727 // parallel threads to reduce contention.
ysr@1580 2728 void CFLS_LAB::retire(int tid) {
ysr@1580 2729 // We run this single threaded with the world stopped;
ysr@1580 2730 // so no need for locks and such.
ysr@1580 2731 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2732 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2733 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2734 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2735 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2736 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2737 "Can't retire more than what we obtained");
ysr@1580 2738 if (_num_blocks[i] > 0) {
ysr@1580 2739 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2740 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2741 {
ysr@3220 2742 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2743 // Mutex::_no_safepoint_check_flag);
ysr@3220 2744
ysr@1580 2745 // Update globals stats for num_blocks used
ysr@1580 2746 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2747 _global_num_workers[i]++;
jmasa@3357 2748 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2749 if (num_retire > 0) {
ysr@1580 2750 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2751 // Reset this list.
jmasa@4196 2752 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
ysr@1580 2753 _indexedFreeList[i].set_size(i);
ysr@1580 2754 }
ysr@1580 2755 }
ysr@1580 2756 if (PrintOldPLAB) {
drchase@6680 2757 gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
ysr@1580 2758 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2759 }
ysr@1580 2760 // Reset stats for next round
ysr@1580 2761 _num_blocks[i] = 0;
ysr@1580 2762 }
ysr@1580 2763 }
ysr@1580 2764 }
ysr@1580 2765
jmasa@7234 2766 // Used by par_get_chunk_of_blocks() for the chunks from the
jmasa@7234 2767 // indexed_free_lists. Looks for a chunk with size that is a multiple
jmasa@7234 2768 // of "word_sz" and if found, splits it into "word_sz" chunks and add
jmasa@7234 2769 // to the free list "fl". "n" is the maximum number of chunks to
jmasa@7234 2770 // be added to "fl".
jmasa@7234 2771 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
duke@435 2772
ysr@1580 2773 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2774 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2775 // then try getting a big chunk and splitting it.
ysr@1580 2776 {
ysr@1580 2777 bool found;
ysr@1580 2778 int k;
ysr@1580 2779 size_t cur_sz;
ysr@1580 2780 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2781 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2782 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2783 k++, cur_sz = k * word_sz) {
jmasa@4196 2784 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
ysr@1580 2785 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2786 {
ysr@1580 2787 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2788 Mutex::_no_safepoint_check_flag);
jmasa@4196 2789 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2790 if (gfl->count() != 0) {
ysr@1580 2791 // nn is the number of chunks of size cur_sz that
ysr@1580 2792 // we'd need to split k-ways each, in order to create
ysr@1580 2793 // "n" chunks of size word_sz each.
ysr@1580 2794 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2795 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2796 found = true;
ysr@1580 2797 if (k > 1) {
ysr@1580 2798 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2799 // we increment the split death count by the number of blocks
ysr@1580 2800 // we just took from the cur_sz-size blocks list and which
ysr@1580 2801 // we will be splitting below.
jmasa@3732 2802 ssize_t deaths = gfl->split_deaths() +
ysr@1580 2803 fl_for_cur_sz.count();
jmasa@3732 2804 gfl->set_split_deaths(deaths);
ysr@1580 2805 }
ysr@1580 2806 }
ysr@1580 2807 }
ysr@1580 2808 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2809 if (found) {
ysr@1580 2810 if (k == 1) {
ysr@1580 2811 fl->prepend(&fl_for_cur_sz);
ysr@1580 2812 } else {
ysr@1580 2813 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2814 FreeChunk* fc;
jmasa@3732 2815 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
ysr@1580 2816 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2817 // access the main chunk sees it as a single free block until we
ysr@1580 2818 // change it.
ysr@1580 2819 size_t fc_size = fc->size();
jmasa@3732 2820 assert(fc->is_free(), "Error");
ysr@1580 2821 for (int i = k-1; i >= 0; i--) {
ysr@1580 2822 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2823 assert((i != 0) ||
jmasa@3732 2824 ((fc == ffc) && ffc->is_free() &&
ysr@2071 2825 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2826 "Counting error");
jmasa@3732 2827 ffc->set_size(word_sz);
jmasa@3732 2828 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2829 ffc->link_next(NULL);
ysr@1580 2830 // Above must occur before BOT is updated below.
ysr@2071 2831 OrderAccess::storestore();
ysr@2071 2832 // splitting from the right, fc_size == i * word_sz
ysr@2071 2833 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2834 fc_size -= word_sz;
ysr@2071 2835 assert(fc_size == i*word_sz, "Error");
ysr@2071 2836 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2837 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2838 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2839 // Push this on "fl".
jmasa@3732 2840 fl->return_chunk_at_head(ffc);
ysr@1580 2841 }
ysr@1580 2842 // TRAP
ysr@1580 2843 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2844 }
ysr@1580 2845 }
ysr@1580 2846 // Update birth stats for this block size.
ysr@1580 2847 size_t num = fl->count();
ysr@1580 2848 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2849 Mutex::_no_safepoint_check_flag);
jmasa@3732 2850 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
jmasa@3732 2851 _indexedFreeList[word_sz].set_split_births(births);
jmasa@7234 2852 return true;
duke@435 2853 }
duke@435 2854 }
jmasa@7234 2855 return found;
duke@435 2856 }
jmasa@7234 2857 }
jmasa@7234 2858
jmasa@7234 2859 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
jmasa@7234 2860
duke@435 2861 FreeChunk* fc = NULL;
duke@435 2862 FreeChunk* rem_fc = NULL;
duke@435 2863 size_t rem;
duke@435 2864 {
duke@435 2865 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2866 Mutex::_no_safepoint_check_flag);
duke@435 2867 while (n > 0) {
jmasa@4196 2868 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
jmasa@3730 2869 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 2870 if (fc != NULL) {
duke@435 2871 break;
duke@435 2872 } else {
duke@435 2873 n--;
duke@435 2874 }
duke@435 2875 }
jmasa@7234 2876 if (fc == NULL) return NULL;
ysr@2071 2877 // Otherwise, split up that block.
ysr@1580 2878 assert((ssize_t)n >= 1, "Control point invariant");
jmasa@3732 2879 assert(fc->is_free(), "Error: should be a free block");
ysr@2071 2880 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2881 const size_t nn = fc->size() / word_sz;
duke@435 2882 n = MIN2(nn, n);
ysr@1580 2883 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2884 rem = fc->size() - n * word_sz;
duke@435 2885 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2886 if (rem > 0 && rem < MinChunkSize) {
duke@435 2887 n--; rem += word_sz;
duke@435 2888 }
jmasa@1583 2889 // Note that at this point we may have n == 0.
jmasa@1583 2890 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2891
jmasa@1583 2892 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2893 // enough to leave a viable remainder. We are unable to
jmasa@1583 2894 // allocate even one block. Return fc to the
jmasa@1583 2895 // dictionary and return, leaving "fl" empty.
jmasa@1583 2896 if (n == 0) {
jmasa@1583 2897 returnChunkToDictionary(fc);
jmasa@7234 2898 return NULL;
jmasa@1583 2899 }
jmasa@1583 2900
jmasa@7234 2901 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@7234 2902 dictionary()->dict_census_update(fc->size(),
jmasa@7234 2903 true /*split*/,
jmasa@7234 2904 false /*birth*/);
jmasa@7234 2905
duke@435 2906 // First return the remainder, if any.
duke@435 2907 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2908 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2909 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2910 // hit if the block is a small block.)
duke@435 2911 if (rem > 0) {
duke@435 2912 size_t prefix_size = n * word_sz;
duke@435 2913 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
jmasa@3732 2914 rem_fc->set_size(rem);
jmasa@3732 2915 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2916 rem_fc->link_next(NULL);
duke@435 2917 // Above must occur before BOT is updated below.
ysr@1580 2918 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2919 OrderAccess::storestore();
duke@435 2920 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
jmasa@3732 2921 assert(fc->is_free(), "Error");
jmasa@3732 2922 fc->set_size(prefix_size);
duke@435 2923 if (rem >= IndexSetSize) {
duke@435 2924 returnChunkToDictionary(rem_fc);
jmasa@4196 2925 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
duke@435 2926 rem_fc = NULL;
duke@435 2927 }
duke@435 2928 // Otherwise, return it to the small list below.
duke@435 2929 }
duke@435 2930 }
duke@435 2931 if (rem_fc != NULL) {
duke@435 2932 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2933 Mutex::_no_safepoint_check_flag);
duke@435 2934 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
jmasa@3732 2935 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
duke@435 2936 smallSplitBirth(rem);
duke@435 2937 }
jmasa@7234 2938 assert(n * word_sz == fc->size(),
jmasa@7234 2939 err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
jmasa@7234 2940 SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
jmasa@7234 2941 fc->size(), n, word_sz));
jmasa@7234 2942 return fc;
jmasa@7234 2943 }
jmasa@7234 2944
jmasa@7234 2945 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7234 2946
jmasa@7234 2947 FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
jmasa@7234 2948
jmasa@7234 2949 if (fc == NULL) {
jmasa@7234 2950 return;
jmasa@7234 2951 }
jmasa@7234 2952
jmasa@7234 2953 size_t n = fc->size() / word_sz;
jmasa@7234 2954
jmasa@7234 2955 assert((ssize_t)n > 0, "Consistency");
duke@435 2956 // Now do the splitting up.
duke@435 2957 // Must do this in reverse order, so that anybody attempting to
duke@435 2958 // access the main chunk sees it as a single free block until we
duke@435 2959 // change it.
duke@435 2960 size_t fc_size = n * word_sz;
duke@435 2961 // All but first chunk in this loop
duke@435 2962 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2963 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
jmasa@3732 2964 ffc->set_size(word_sz);
jmasa@3732 2965 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2966 ffc->link_next(NULL);
duke@435 2967 // Above must occur before BOT is updated below.
ysr@2071 2968 OrderAccess::storestore();
duke@435 2969 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2970 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2971 fc_size -= word_sz;
duke@435 2972 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2973 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2974 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2975 // Push this on "fl".
jmasa@3732 2976 fl->return_chunk_at_head(ffc);
duke@435 2977 }
duke@435 2978 // First chunk
jmasa@3732 2979 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2980 // The blocks above should show their new sizes before the first block below
jmasa@3732 2981 fc->set_size(word_sz);
jmasa@3732 2982 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
jmasa@3732 2983 fc->link_next(NULL);
duke@435 2984 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2985 _bt.verify_single_block((HeapWord*)fc, fc->size());
jmasa@3732 2986 fl->return_chunk_at_head(fc);
duke@435 2987
ysr@1580 2988 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2989 {
ysr@1580 2990 // Update the stats for this block size.
duke@435 2991 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2992 Mutex::_no_safepoint_check_flag);
jmasa@3732 2993 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
jmasa@3732 2994 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2995 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2996 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2997 }
duke@435 2998
duke@435 2999 // TRAP
duke@435 3000 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 3001 }
duke@435 3002
jmasa@7234 3003 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7234 3004 assert(fl->count() == 0, "Precondition.");
jmasa@7234 3005 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
jmasa@7234 3006 "Precondition");
jmasa@7234 3007
jmasa@7234 3008 if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
jmasa@7234 3009 // Got it
jmasa@7234 3010 return;
jmasa@7234 3011 }
jmasa@7234 3012
jmasa@7234 3013 // Otherwise, we'll split a block from the dictionary.
jmasa@7234 3014 par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
jmasa@7234 3015 }
jmasa@7234 3016
duke@435 3017 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3018 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 3019 // XXX Need to suitably abstract and generalize this and the next
duke@435 3020 // method into one.
duke@435 3021 void
duke@435 3022 CompactibleFreeListSpace::
duke@435 3023 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 3024 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3025 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3026 const size_t task_size = rescan_task_size();
duke@435 3027 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 3028 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 3029 assert(n_tasks == 0 ||
ysr@775 3030 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 3031 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 3032 "n_tasks calculation incorrect");
duke@435 3033 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3034 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3035 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3036 // need to finish in order to be done).
jmasa@2188 3037 pst->set_n_threads(n_threads);
duke@435 3038 pst->set_n_tasks((int)n_tasks);
duke@435 3039 }
duke@435 3040
duke@435 3041 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3042 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 3043 void
duke@435 3044 CompactibleFreeListSpace::
duke@435 3045 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 3046 HeapWord* low) {
duke@435 3047 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3048 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3049 const size_t task_size = marking_task_size();
duke@435 3050 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3051 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3052 "Otherwise arithmetic below would be incorrect");
duke@435 3053 MemRegion span = _gen->reserved();
duke@435 3054 if (low != NULL) {
duke@435 3055 if (span.contains(low)) {
duke@435 3056 // Align low down to a card boundary so that
duke@435 3057 // we can use block_offset_careful() on span boundaries.
duke@435 3058 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3059 CardTableModRefBS::card_size);
duke@435 3060 // Clip span prefix at aligned_low
duke@435 3061 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3062 } else if (low > span.end()) {
duke@435 3063 span = MemRegion(low, low); // Null region
duke@435 3064 } // else use entire span
duke@435 3065 }
duke@435 3066 assert(span.is_empty() ||
duke@435 3067 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3068 "span should start at a card boundary");
duke@435 3069 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3070 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3071 assert(n_tasks == 0 ||
duke@435 3072 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3073 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3074 "n_tasks calculation incorrect");
duke@435 3075 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3076 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3077 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3078 // need to finish in order to be done).
jmasa@2188 3079 pst->set_n_threads(n_threads);
duke@435 3080 pst->set_n_tasks((int)n_tasks);
duke@435 3081 }

mercurial