src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Wed, 25 Apr 2012 09:55:55 -0700

author
jmasa
date
Wed, 25 Apr 2012 09:55:55 -0700
changeset 3732
f69a5d43dc19
parent 3730
9f059abe8cf2
child 3807
c92a79900986
permissions
-rw-r--r--

7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*
Summary: Fix naming style to be consistent with the predominant hotspot style.
Reviewed-by: ysr, brutisso

duke@435 1 /*
brutisso@3711 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 32 #include "gc_interface/collectedHeap.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/globals.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/init.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/vmThread.hpp"
stefank@2314 43 #include "utilities/copy.hpp"
duke@435 44
duke@435 45 /////////////////////////////////////////////////////////////////////////
duke@435 46 //// CompactibleFreeListSpace
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48
duke@435 49 // highest ranked free list lock rank
duke@435 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 51
kvn@1926 52 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 55
kvn@1926 56 size_t MinChunkSize = 0;
kvn@1926 57
kvn@1926 58 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 59 // Set CMS global values
kvn@1926 60 assert(MinChunkSize == 0, "already set");
kvn@1926 61 #define numQuanta(x,y) ((x+y-1)/y)
kvn@1926 62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
kvn@1926 63
kvn@1926 64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 65 IndexSetStart = MinChunkSize;
kvn@1926 66 IndexSetStride = MinObjAlignment;
kvn@1926 67 }
kvn@1926 68
duke@435 69 // Constructor
duke@435 70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 71 MemRegion mr, bool use_adaptive_freelists,
jmasa@3730 72 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@435 73 _dictionaryChoice(dictionaryChoice),
duke@435 74 _adaptive_freelists(use_adaptive_freelists),
duke@435 75 _bt(bs, mr),
duke@435 76 // free list locks are in the range of values taken by _lockRank
duke@435 77 // This range currently is [_leaf+2, _leaf+3]
duke@435 78 // Note: this requires that CFLspace c'tors
duke@435 79 // are called serially in the order in which the locks are
duke@435 80 // are acquired in the program text. This is true today.
duke@435 81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 83 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 85 CMSRescanMultiple),
duke@435 86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 87 CMSConcMarkMultiple),
duke@435 88 _collector(NULL)
duke@435 89 {
jmasa@3730 90 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
jmasa@3730 91 "FreeChunk is larger than expected");
duke@435 92 _bt.set_space(this);
jmasa@698 93 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 94 // We have all of "mr", all of which we place in the dictionary
duke@435 95 // as one big chunk. We'll need to decide here which of several
duke@435 96 // possible alternative dictionary implementations to use. For
duke@435 97 // now the choice is easy, since we have only one working
duke@435 98 // implementation, namely, the simple binary tree (splaying
duke@435 99 // temporarily disabled).
duke@435 100 switch (dictionaryChoice) {
jmasa@3730 101 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
jmasa@3730 102 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
duke@435 103 default:
duke@435 104 warning("dictionaryChoice: selected option not understood; using"
duke@435 105 " default BinaryTreeDictionary implementation instead.");
jmasa@3730 106 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
jmasa@3730 107 _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
duke@435 108 break;
duke@435 109 }
duke@435 110 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 111 // The indexed free lists are initially all empty and are lazily
duke@435 112 // filled in on demand. Initialize the array elements to NULL.
duke@435 113 initializeIndexedFreeListArray();
duke@435 114
duke@435 115 // Not using adaptive free lists assumes that allocation is first
duke@435 116 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 117 // has to have the klass's klassKlass allocated at a lower
duke@435 118 // address in the heap than the klass so that the klassKlass is
duke@435 119 // moved to its new location before the klass is moved.
duke@435 120 // Set the _refillSize for the linear allocation blocks
duke@435 121 if (!use_adaptive_freelists) {
jmasa@3732 122 FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
duke@435 123 // The small linAB initially has all the space and will allocate
duke@435 124 // a chunk of any size.
duke@435 125 HeapWord* addr = (HeapWord*) fc;
duke@435 126 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 127 1024*SmallForLinearAlloc, fc->size());
duke@435 128 // Note that _unallocated_block is not updated here.
duke@435 129 // Allocations from the linear allocation block should
duke@435 130 // update it.
duke@435 131 } else {
duke@435 132 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 133 SmallForLinearAlloc);
duke@435 134 }
duke@435 135 // CMSIndexedFreeListReplenish should be at least 1
duke@435 136 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 137 _promoInfo.setSpace(this);
duke@435 138 if (UseCMSBestFit) {
duke@435 139 _fitStrategy = FreeBlockBestFitFirst;
duke@435 140 } else {
duke@435 141 _fitStrategy = FreeBlockStrategyNone;
duke@435 142 }
ysr@3220 143 check_free_list_consistency();
duke@435 144
duke@435 145 // Initialize locks for parallel case.
jmasa@2188 146
jmasa@2188 147 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 148 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 149 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 150 "a freelist par lock",
duke@435 151 true);
duke@435 152 if (_indexedFreeListParLocks[i] == NULL)
duke@435 153 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 154 DEBUG_ONLY(
duke@435 155 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 156 )
duke@435 157 }
duke@435 158 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 159 }
duke@435 160 }
duke@435 161
duke@435 162 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 163 // update the block offset table. Removed initialize_threshold call because
duke@435 164 // CFLS does not use a block offset array for contiguous spaces.
duke@435 165 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 166 CompactPoint* cp, HeapWord* compact_top) {
duke@435 167 // q is alive
duke@435 168 // First check if we should switch compaction space
duke@435 169 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 170 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 171 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 172 "virtual adjustObjectSize_v() method is not correct");
duke@435 173 size_t adjusted_size = adjustObjectSize(size);
duke@435 174 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 175 "no small fragments allowed");
duke@435 176 assert(minimum_free_block_size() == MinChunkSize,
duke@435 177 "for de-virtualized reference below");
duke@435 178 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 179 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 180 adjusted_size != compaction_max_size) {
duke@435 181 do {
duke@435 182 // switch to next compaction space
duke@435 183 cp->space->set_compaction_top(compact_top);
duke@435 184 cp->space = cp->space->next_compaction_space();
duke@435 185 if (cp->space == NULL) {
duke@435 186 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 187 assert(cp->gen != NULL, "compaction must succeed");
duke@435 188 cp->space = cp->gen->first_compaction_space();
duke@435 189 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 190 }
duke@435 191 compact_top = cp->space->bottom();
duke@435 192 cp->space->set_compaction_top(compact_top);
duke@435 193 // The correct adjusted_size may not be the same as that for this method
duke@435 194 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 195 // Use the virtual method which is not used above to save the virtual
duke@435 196 // dispatch.
duke@435 197 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 198 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 199 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 200 } while (adjusted_size > compaction_max_size);
duke@435 201 }
duke@435 202
duke@435 203 // store the forwarding pointer into the mark word
duke@435 204 if ((HeapWord*)q != compact_top) {
duke@435 205 q->forward_to(oop(compact_top));
duke@435 206 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 207 } else {
duke@435 208 // if the object isn't moving we can just set the mark to the default
duke@435 209 // mark and handle it specially later on.
duke@435 210 q->init_mark();
duke@435 211 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 212 }
duke@435 213
coleenp@548 214 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 215 compact_top += adjusted_size;
duke@435 216
duke@435 217 // we need to update the offset table so that the beginnings of objects can be
duke@435 218 // found during scavenge. Note that we are updating the offset table based on
duke@435 219 // where the object will be once the compaction phase finishes.
duke@435 220
duke@435 221 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 222 // the compaction_top exceeds the current threshold but not for an
duke@435 223 // non-contiguous space.
duke@435 224 cp->threshold =
duke@435 225 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 226 return compact_top;
duke@435 227 }
duke@435 228
duke@435 229 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 230 // and use of single_block instead of alloc_block. The name here is not really
duke@435 231 // appropriate - maybe a more general name could be invented for both the
duke@435 232 // contiguous and noncontiguous spaces.
duke@435 233
duke@435 234 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 235 _bt.single_block(start, the_end);
duke@435 236 return end();
duke@435 237 }
duke@435 238
duke@435 239 // Initialize them to NULL.
duke@435 240 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 241 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 242 // Note that on platforms where objects are double word aligned,
duke@435 243 // the odd array elements are not used. It is convenient, however,
duke@435 244 // to map directly from the object size to the array element.
duke@435 245 _indexedFreeList[i].reset(IndexSetSize);
duke@435 246 _indexedFreeList[i].set_size(i);
duke@435 247 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 248 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 249 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 250 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 251 }
duke@435 252 }
duke@435 253
duke@435 254 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 255 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 256 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 257 "Indexed free list sizes are incorrect");
duke@435 258 _indexedFreeList[i].reset(IndexSetSize);
duke@435 259 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 260 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 261 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 262 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 263 }
duke@435 264 }
duke@435 265
duke@435 266 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 267 resetIndexedFreeListArray();
duke@435 268 dictionary()->reset();
duke@435 269 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 270 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 271 // Everything's allocated until proven otherwise.
duke@435 272 _bt.set_unallocated_block(end());
duke@435 273 }
duke@435 274 if (!mr.is_empty()) {
duke@435 275 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 276 _bt.single_block(mr.start(), mr.word_size());
duke@435 277 FreeChunk* fc = (FreeChunk*) mr.start();
jmasa@3732 278 fc->set_size(mr.word_size());
duke@435 279 if (mr.word_size() >= IndexSetSize ) {
duke@435 280 returnChunkToDictionary(fc);
duke@435 281 } else {
duke@435 282 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
jmasa@3732 283 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
duke@435 284 }
duke@435 285 }
duke@435 286 _promoInfo.reset();
duke@435 287 _smallLinearAllocBlock._ptr = NULL;
duke@435 288 _smallLinearAllocBlock._word_size = 0;
duke@435 289 }
duke@435 290
duke@435 291 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 292 // Reset the space to the new reality - one free chunk.
duke@435 293 MemRegion mr(compaction_top(), end());
duke@435 294 reset(mr);
duke@435 295 // Now refill the linear allocation block(s) if possible.
duke@435 296 if (_adaptive_freelists) {
duke@435 297 refillLinearAllocBlocksIfNeeded();
duke@435 298 } else {
duke@435 299 // Place as much of mr in the linAB as we can get,
duke@435 300 // provided it was big enough to go into the dictionary.
jmasa@3732 301 FreeChunk* fc = dictionary()->find_largest_dict();
duke@435 302 if (fc != NULL) {
duke@435 303 assert(fc->size() == mr.word_size(),
duke@435 304 "Why was the chunk broken up?");
duke@435 305 removeChunkFromDictionary(fc);
duke@435 306 HeapWord* addr = (HeapWord*) fc;
duke@435 307 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 308 1024*SmallForLinearAlloc, fc->size());
duke@435 309 // Note that _unallocated_block is not updated here.
duke@435 310 }
duke@435 311 }
duke@435 312 }
duke@435 313
duke@435 314 // Walks the entire dictionary, returning a coterminal
duke@435 315 // chunk, if it exists. Use with caution since it involves
duke@435 316 // a potentially complete walk of a potentially large tree.
duke@435 317 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 318
duke@435 319 assert_lock_strong(&_freelistLock);
duke@435 320
duke@435 321 return dictionary()->find_chunk_ends_at(end());
duke@435 322 }
duke@435 323
duke@435 324
duke@435 325 #ifndef PRODUCT
duke@435 326 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 327 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 328 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
duke@435 329 }
duke@435 330 }
duke@435 331
duke@435 332 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 333 size_t sum = 0;
duke@435 334 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 335 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
duke@435 336 }
duke@435 337 return sum;
duke@435 338 }
duke@435 339
duke@435 340 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 341 size_t count = 0;
ysr@3264 342 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 343 debug_only(
duke@435 344 ssize_t total_list_count = 0;
duke@435 345 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 346 fc = fc->next()) {
duke@435 347 total_list_count++;
duke@435 348 }
duke@435 349 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 350 "Count in list is incorrect");
duke@435 351 )
duke@435 352 count += _indexedFreeList[i].count();
duke@435 353 }
duke@435 354 return count;
duke@435 355 }
duke@435 356
duke@435 357 size_t CompactibleFreeListSpace::totalCount() {
duke@435 358 size_t num = totalCountInIndexedFreeLists();
jmasa@3732 359 num += dictionary()->total_count();
duke@435 360 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 361 num++;
duke@435 362 }
duke@435 363 return num;
duke@435 364 }
duke@435 365 #endif
duke@435 366
duke@435 367 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 368 FreeChunk* fc = (FreeChunk*) p;
jmasa@3732 369 return fc->is_free();
duke@435 370 }
duke@435 371
duke@435 372 size_t CompactibleFreeListSpace::used() const {
duke@435 373 return capacity() - free();
duke@435 374 }
duke@435 375
duke@435 376 size_t CompactibleFreeListSpace::free() const {
duke@435 377 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 378 // if you do this while the structures are in flux you
duke@435 379 // may get an approximate answer only; for instance
duke@435 380 // because there is concurrent allocation either
duke@435 381 // directly by mutators or for promotion during a GC.
duke@435 382 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 383 // not to crash and burn, for instance, because of walking
duke@435 384 // pointers that could disappear as you were walking them.
duke@435 385 // The approximation is because the various components
duke@435 386 // that are read below are not read atomically (and
duke@435 387 // further the computation of totalSizeInIndexedFreeLists()
duke@435 388 // is itself a non-atomic computation. The normal use of
duke@435 389 // this is during a resize operation at the end of GC
duke@435 390 // and at that time you are guaranteed to get the
duke@435 391 // correct actual value. However, for instance, this is
duke@435 392 // also read completely asynchronously by the "perf-sampler"
duke@435 393 // that supports jvmstat, and you are apt to see the values
duke@435 394 // flicker in such cases.
duke@435 395 assert(_dictionary != NULL, "No _dictionary?");
jmasa@3732 396 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
duke@435 397 totalSizeInIndexedFreeLists() +
duke@435 398 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 399 }
duke@435 400
duke@435 401 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 402 assert(_dictionary != NULL, "No _dictionary?");
duke@435 403 assert_locked();
jmasa@3732 404 size_t res = _dictionary->max_chunk_size();
duke@435 405 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 406 (size_t) SmallForLinearAlloc - 1));
duke@435 407 // XXX the following could potentially be pretty slow;
duke@435 408 // should one, pesimally for the rare cases when res
duke@435 409 // caclulated above is less than IndexSetSize,
duke@435 410 // just return res calculated above? My reasoning was that
duke@435 411 // those cases will be so rare that the extra time spent doesn't
duke@435 412 // really matter....
duke@435 413 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 414 // to i > res below, because i is unsigned and res may be zero.
duke@435 415 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 416 i -= IndexSetStride) {
duke@435 417 if (_indexedFreeList[i].head() != NULL) {
duke@435 418 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 419 return i;
duke@435 420 }
duke@435 421 }
duke@435 422 return res;
duke@435 423 }
duke@435 424
ysr@2071 425 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 426 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 427 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
ysr@2071 428 _ptr, _word_size, _refillSize, _allocation_size_limit);
ysr@2071 429 }
ysr@2071 430
ysr@2071 431 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 432 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 433 st->print_cr(" Space:");
ysr@2071 434 Space::print_on(st);
ysr@2071 435
ysr@2071 436 st->print_cr("promoInfo:");
ysr@2071 437 _promoInfo.print_on(st);
ysr@2071 438
ysr@2071 439 st->print_cr("_smallLinearAllocBlock");
ysr@2071 440 _smallLinearAllocBlock.print_on(st);
ysr@2071 441
ysr@2071 442 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 443
ysr@2071 444 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 445 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 446 }
ysr@2071 447
ysr@1580 448 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 449 const {
ysr@1580 450 reportIndexedFreeListStatistics();
ysr@1580 451 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 452 gclog_or_tty->print_cr("---------------------------");
jmasa@3730 453 FreeList<FreeChunk>::print_labels_on(st, "size");
ysr@1580 454 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 455 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 456 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 457 fc = fc->next()) {
ysr@1580 458 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 459 fc, (HeapWord*)fc + i,
ysr@1580 460 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 461 }
ysr@1580 462 }
ysr@1580 463 }
ysr@1580 464
ysr@1580 465 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 466 const {
ysr@1580 467 _promoInfo.print_on(st);
ysr@1580 468 }
ysr@1580 469
ysr@1580 470 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 471 const {
jmasa@3732 472 _dictionary->report_statistics();
ysr@1580 473 st->print_cr("Layout of Freelists in Tree");
ysr@1580 474 st->print_cr("---------------------------");
ysr@1580 475 _dictionary->print_free_lists(st);
ysr@1580 476 }
ysr@1580 477
ysr@1580 478 class BlkPrintingClosure: public BlkClosure {
ysr@1580 479 const CMSCollector* _collector;
ysr@1580 480 const CompactibleFreeListSpace* _sp;
ysr@1580 481 const CMSBitMap* _live_bit_map;
ysr@1580 482 const bool _post_remark;
ysr@1580 483 outputStream* _st;
ysr@1580 484 public:
ysr@1580 485 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 486 const CompactibleFreeListSpace* sp,
ysr@1580 487 const CMSBitMap* live_bit_map,
ysr@1580 488 outputStream* st):
ysr@1580 489 _collector(collector),
ysr@1580 490 _sp(sp),
ysr@1580 491 _live_bit_map(live_bit_map),
ysr@1580 492 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 493 _st(st) { }
ysr@1580 494 size_t do_blk(HeapWord* addr);
ysr@1580 495 };
ysr@1580 496
ysr@1580 497 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 498 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 499 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 500 if (_sp->block_is_obj(addr)) {
ysr@1580 501 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 502 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 503 addr,
ysr@1580 504 dead ? "dead" : "live",
ysr@1580 505 sz,
ysr@1580 506 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 507 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 508 oop(addr)->print_on(_st);
ysr@1580 509 _st->print_cr("--------------------------------------");
ysr@1580 510 }
ysr@1580 511 } else { // free block
ysr@1580 512 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 513 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 514 if (CMSPrintChunksInDump) {
ysr@1580 515 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 516 _st->print_cr("--------------------------------------");
ysr@1580 517 }
ysr@1580 518 }
ysr@1580 519 return sz;
ysr@1580 520 }
ysr@1580 521
ysr@1580 522 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 523 outputStream* st) {
ysr@1580 524 st->print_cr("\n=========================");
ysr@1580 525 st->print_cr("Block layout in CMS Heap:");
ysr@1580 526 st->print_cr("=========================");
ysr@1580 527 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 528 blk_iterate(&bpcl);
ysr@1580 529
ysr@1580 530 st->print_cr("\n=======================================");
ysr@1580 531 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 532 st->print_cr("=======================================");
ysr@1580 533 print_promo_info_blocks(st);
ysr@1580 534
ysr@1580 535 st->print_cr("\n===========================");
ysr@1580 536 st->print_cr("Order of Indexed Free Lists");
ysr@1580 537 st->print_cr("=========================");
ysr@1580 538 print_indexed_free_lists(st);
ysr@1580 539
ysr@1580 540 st->print_cr("\n=================================");
ysr@1580 541 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 542 st->print_cr("=================================");
ysr@1580 543 print_dictionary_free_lists(st);
ysr@1580 544 }
ysr@1580 545
ysr@1580 546
duke@435 547 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 548 assert_lock_strong(&_freelistLock);
duke@435 549 assert(PrintFLSStatistics != 0, "Reporting error");
jmasa@3732 550 _dictionary->report_statistics();
duke@435 551 if (PrintFLSStatistics > 1) {
duke@435 552 reportIndexedFreeListStatistics();
jmasa@3732 553 size_t total_size = totalSizeInIndexedFreeLists() +
jmasa@3732 554 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
jmasa@3732 555 gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
duke@435 556 }
duke@435 557 }
duke@435 558
duke@435 559 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 560 assert_lock_strong(&_freelistLock);
duke@435 561 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 562 "--------------------------------\n");
jmasa@3732 563 size_t total_size = totalSizeInIndexedFreeLists();
jmasa@3732 564 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
jmasa@3732 565 gclog_or_tty->print("Total Free Space: %d\n", total_size);
duke@435 566 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
jmasa@3732 567 gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
jmasa@3732 568 if (free_blocks != 0) {
jmasa@3732 569 gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
duke@435 570 }
duke@435 571 }
duke@435 572
duke@435 573 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 574 size_t res = 0;
duke@435 575 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 576 debug_only(
duke@435 577 ssize_t recount = 0;
duke@435 578 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 579 fc = fc->next()) {
duke@435 580 recount += 1;
duke@435 581 }
duke@435 582 assert(recount == _indexedFreeList[i].count(),
duke@435 583 "Incorrect count in list");
duke@435 584 )
duke@435 585 res += _indexedFreeList[i].count();
duke@435 586 }
duke@435 587 return res;
duke@435 588 }
duke@435 589
duke@435 590 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 591 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 592 if (_indexedFreeList[i].head() != NULL) {
duke@435 593 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 594 return (size_t)i;
duke@435 595 }
duke@435 596 }
duke@435 597 return 0;
duke@435 598 }
duke@435 599
duke@435 600 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 601 HeapWord* prevEnd = end();
duke@435 602 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 603 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 604 "New end is below unallocated block");
duke@435 605 _end = value;
duke@435 606 if (prevEnd != NULL) {
duke@435 607 // Resize the underlying block offset table.
duke@435 608 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 609 if (value <= prevEnd) {
ysr@2071 610 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 611 "New end is below unallocated block");
ysr@1580 612 } else {
ysr@1580 613 // Now, take this new chunk and add it to the free blocks.
ysr@1580 614 // Note that the BOT has not yet been updated for this block.
ysr@1580 615 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 616 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 617 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 618 // Mark the boundary of the new block in BOT
ysr@1580 619 _bt.mark_block(prevEnd, value);
ysr@1580 620 // put it all in the linAB
ysr@1580 621 if (ParallelGCThreads == 0) {
ysr@1580 622 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 623 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 624 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 625 } else { // ParallelGCThreads > 0
ysr@1580 626 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 627 Mutex::_no_safepoint_check_flag);
ysr@1580 628 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 629 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 630 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 631 }
ysr@1580 632 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 633 // of chunks as they are allocated out of a LinAB are.
ysr@1580 634 } else {
ysr@1580 635 // Add the block to the free lists, if possible coalescing it
ysr@1580 636 // with the last free block, and update the BOT and census data.
ysr@1580 637 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 638 }
duke@435 639 }
duke@435 640 }
duke@435 641 }
duke@435 642
duke@435 643 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 644 CompactibleFreeListSpace* _cfls;
duke@435 645 CMSCollector* _collector;
duke@435 646 protected:
duke@435 647 // Override.
duke@435 648 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 649 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 650 HeapWord* bottom, HeapWord* top, \
duke@435 651 ClosureType* cl); \
duke@435 652 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 653 HeapWord* bottom, HeapWord* top, \
duke@435 654 ClosureType* cl); \
duke@435 655 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 656 HeapWord* bottom, HeapWord* top, \
duke@435 657 ClosureType* cl)
duke@435 658 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 659 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 660
duke@435 661 public:
duke@435 662 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 663 CMSCollector* collector,
duke@435 664 OopClosure* cl,
duke@435 665 CardTableModRefBS::PrecisionStyle precision,
duke@435 666 HeapWord* boundary) :
duke@435 667 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 668 _cfls(sp), _collector(collector) {}
duke@435 669 };
duke@435 670
duke@435 671 // We de-virtualize the block-related calls below, since we know that our
duke@435 672 // space is a CompactibleFreeListSpace.
jmasa@3294 673
duke@435 674 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 675 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 676 HeapWord* bottom, \
duke@435 677 HeapWord* top, \
duke@435 678 ClosureType* cl) { \
jmasa@3294 679 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 680 if (is_par) { \
jmasa@3294 681 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 682 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 683 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 684 } else { \
duke@435 685 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 686 } \
duke@435 687 } \
duke@435 688 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 689 HeapWord* bottom, \
duke@435 690 HeapWord* top, \
duke@435 691 ClosureType* cl) { \
duke@435 692 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 693 back too far. */ \
duke@435 694 HeapWord* mr_start = mr.start(); \
duke@435 695 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 696 HeapWord* next = bottom + bot_size; \
duke@435 697 while (next < mr_start) { \
duke@435 698 bottom = next; \
duke@435 699 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 700 next = bottom + bot_size; \
duke@435 701 } \
duke@435 702 \
duke@435 703 while (bottom < top) { \
duke@435 704 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 705 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 706 oop(bottom)) && \
duke@435 707 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 708 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 709 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 710 } else { \
duke@435 711 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 712 } \
duke@435 713 } \
duke@435 714 } \
duke@435 715 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 716 HeapWord* bottom, \
duke@435 717 HeapWord* top, \
duke@435 718 ClosureType* cl) { \
duke@435 719 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 720 back too far. */ \
duke@435 721 HeapWord* mr_start = mr.start(); \
duke@435 722 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 723 HeapWord* next = bottom + bot_size; \
duke@435 724 while (next < mr_start) { \
duke@435 725 bottom = next; \
duke@435 726 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 727 next = bottom + bot_size; \
duke@435 728 } \
duke@435 729 \
duke@435 730 while (bottom < top) { \
duke@435 731 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 732 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 733 oop(bottom)) && \
duke@435 734 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 735 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 736 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 737 } else { \
duke@435 738 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 739 } \
duke@435 740 } \
duke@435 741 }
duke@435 742
duke@435 743 // (There are only two of these, rather than N, because the split is due
duke@435 744 // only to the introduction of the FilteringClosure, a local part of the
duke@435 745 // impl of this abstraction.)
duke@435 746 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 747 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 748
duke@435 749 DirtyCardToOopClosure*
duke@435 750 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 751 CardTableModRefBS::PrecisionStyle precision,
duke@435 752 HeapWord* boundary) {
duke@435 753 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 754 }
duke@435 755
duke@435 756
duke@435 757 // Note on locking for the space iteration functions:
duke@435 758 // since the collector's iteration activities are concurrent with
duke@435 759 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 760 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 761 // may suddenly be allocated or divided up and part of it allocated and
duke@435 762 // so on.
duke@435 763
duke@435 764 // Apply the given closure to each block in the space.
duke@435 765 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 766 assert_lock_strong(freelistLock());
duke@435 767 HeapWord *cur, *limit;
duke@435 768 for (cur = bottom(), limit = end(); cur < limit;
duke@435 769 cur += cl->do_blk_careful(cur));
duke@435 770 }
duke@435 771
duke@435 772 // Apply the given closure to each block in the space.
duke@435 773 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 774 assert_lock_strong(freelistLock());
duke@435 775 HeapWord *cur, *limit;
duke@435 776 for (cur = bottom(), limit = end(); cur < limit;
duke@435 777 cur += cl->do_blk(cur));
duke@435 778 }
duke@435 779
duke@435 780 // Apply the given closure to each oop in the space.
duke@435 781 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 782 assert_lock_strong(freelistLock());
duke@435 783 HeapWord *cur, *limit;
duke@435 784 size_t curSize;
duke@435 785 for (cur = bottom(), limit = end(); cur < limit;
duke@435 786 cur += curSize) {
duke@435 787 curSize = block_size(cur);
duke@435 788 if (block_is_obj(cur)) {
duke@435 789 oop(cur)->oop_iterate(cl);
duke@435 790 }
duke@435 791 }
duke@435 792 }
duke@435 793
duke@435 794 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 795 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 796 assert_lock_strong(freelistLock());
duke@435 797 if (is_empty()) {
duke@435 798 return;
duke@435 799 }
duke@435 800 MemRegion cur = MemRegion(bottom(), end());
duke@435 801 mr = mr.intersection(cur);
duke@435 802 if (mr.is_empty()) {
duke@435 803 return;
duke@435 804 }
duke@435 805 if (mr.equals(cur)) {
duke@435 806 oop_iterate(cl);
duke@435 807 return;
duke@435 808 }
duke@435 809 assert(mr.end() <= end(), "just took an intersection above");
duke@435 810 HeapWord* obj_addr = block_start(mr.start());
duke@435 811 HeapWord* t = mr.end();
duke@435 812
duke@435 813 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 814 if (block_is_obj(obj_addr)) {
duke@435 815 // Handle first object specially.
duke@435 816 oop obj = oop(obj_addr);
duke@435 817 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 818 } else {
duke@435 819 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 820 obj_addr += fc->size();
duke@435 821 }
duke@435 822 while (obj_addr < t) {
duke@435 823 HeapWord* obj = obj_addr;
duke@435 824 obj_addr += block_size(obj_addr);
duke@435 825 // If "obj_addr" is not greater than top, then the
duke@435 826 // entire object "obj" is within the region.
duke@435 827 if (obj_addr <= t) {
duke@435 828 if (block_is_obj(obj)) {
duke@435 829 oop(obj)->oop_iterate(cl);
duke@435 830 }
duke@435 831 } else {
duke@435 832 // "obj" extends beyond end of region
duke@435 833 if (block_is_obj(obj)) {
duke@435 834 oop(obj)->oop_iterate(&smr_blk);
duke@435 835 }
duke@435 836 break;
duke@435 837 }
duke@435 838 }
duke@435 839 }
duke@435 840
duke@435 841 // NOTE: In the following methods, in order to safely be able to
duke@435 842 // apply the closure to an object, we need to be sure that the
duke@435 843 // object has been initialized. We are guaranteed that an object
duke@435 844 // is initialized if we are holding the Heap_lock with the
duke@435 845 // world stopped.
duke@435 846 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 847 if (is_init_completed()) {
duke@435 848 assert_locked_or_safepoint(Heap_lock);
duke@435 849 if (Universe::is_fully_initialized()) {
duke@435 850 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 851 "Required for objects to be initialized");
duke@435 852 }
duke@435 853 } // else make a concession at vm start-up
duke@435 854 }
duke@435 855
duke@435 856 // Apply the given closure to each object in the space
duke@435 857 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 858 assert_lock_strong(freelistLock());
duke@435 859 NOT_PRODUCT(verify_objects_initialized());
duke@435 860 HeapWord *cur, *limit;
duke@435 861 size_t curSize;
duke@435 862 for (cur = bottom(), limit = end(); cur < limit;
duke@435 863 cur += curSize) {
duke@435 864 curSize = block_size(cur);
duke@435 865 if (block_is_obj(cur)) {
duke@435 866 blk->do_object(oop(cur));
duke@435 867 }
duke@435 868 }
duke@435 869 }
duke@435 870
jmasa@952 871 // Apply the given closure to each live object in the space
jmasa@952 872 // The usage of CompactibleFreeListSpace
jmasa@952 873 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 874 // objects in the space with references to objects that are no longer
jmasa@952 875 // valid. For example, an object may reference another object
jmasa@952 876 // that has already been sweep up (collected). This method uses
jmasa@952 877 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 878 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 879 // object is decided.
jmasa@952 880
jmasa@952 881 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 882 assert_lock_strong(freelistLock());
jmasa@952 883 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 884 HeapWord *cur, *limit;
jmasa@952 885 size_t curSize;
jmasa@952 886 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 887 cur += curSize) {
jmasa@952 888 curSize = block_size(cur);
jmasa@952 889 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 890 blk->do_object(oop(cur));
jmasa@952 891 }
jmasa@952 892 }
jmasa@952 893 }
jmasa@952 894
duke@435 895 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 896 UpwardsObjectClosure* cl) {
ysr@1580 897 assert_locked(freelistLock());
duke@435 898 NOT_PRODUCT(verify_objects_initialized());
duke@435 899 Space::object_iterate_mem(mr, cl);
duke@435 900 }
duke@435 901
duke@435 902 // Callers of this iterator beware: The closure application should
duke@435 903 // be robust in the face of uninitialized objects and should (always)
duke@435 904 // return a correct size so that the next addr + size below gives us a
duke@435 905 // valid block boundary. [See for instance,
duke@435 906 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 907 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 908 HeapWord*
duke@435 909 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 910 assert_lock_strong(freelistLock());
duke@435 911 HeapWord *addr, *last;
duke@435 912 size_t size;
duke@435 913 for (addr = bottom(), last = end();
duke@435 914 addr < last; addr += size) {
duke@435 915 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 916 if (fc->is_free()) {
duke@435 917 // Since we hold the free list lock, which protects direct
duke@435 918 // allocation in this generation by mutators, a free object
duke@435 919 // will remain free throughout this iteration code.
duke@435 920 size = fc->size();
duke@435 921 } else {
duke@435 922 // Note that the object need not necessarily be initialized,
duke@435 923 // because (for instance) the free list lock does NOT protect
duke@435 924 // object initialization. The closure application below must
duke@435 925 // therefore be correct in the face of uninitialized objects.
duke@435 926 size = cl->do_object_careful(oop(addr));
duke@435 927 if (size == 0) {
duke@435 928 // An unparsable object found. Signal early termination.
duke@435 929 return addr;
duke@435 930 }
duke@435 931 }
duke@435 932 }
duke@435 933 return NULL;
duke@435 934 }
duke@435 935
duke@435 936 // Callers of this iterator beware: The closure application should
duke@435 937 // be robust in the face of uninitialized objects and should (always)
duke@435 938 // return a correct size so that the next addr + size below gives us a
duke@435 939 // valid block boundary. [See for instance,
duke@435 940 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 941 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 942 HeapWord*
duke@435 943 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 944 ObjectClosureCareful* cl) {
duke@435 945 assert_lock_strong(freelistLock());
duke@435 946 // Can't use used_region() below because it may not necessarily
duke@435 947 // be the same as [bottom(),end()); although we could
duke@435 948 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 949 // that appears too cumbersome, so we just do the simpler check
duke@435 950 // in the assertion below.
duke@435 951 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 952 "mr should be non-empty and within used space");
duke@435 953 HeapWord *addr, *end;
duke@435 954 size_t size;
duke@435 955 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 956 addr < end; addr += size) {
duke@435 957 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 958 if (fc->is_free()) {
duke@435 959 // Since we hold the free list lock, which protects direct
duke@435 960 // allocation in this generation by mutators, a free object
duke@435 961 // will remain free throughout this iteration code.
duke@435 962 size = fc->size();
duke@435 963 } else {
duke@435 964 // Note that the object need not necessarily be initialized,
duke@435 965 // because (for instance) the free list lock does NOT protect
duke@435 966 // object initialization. The closure application below must
duke@435 967 // therefore be correct in the face of uninitialized objects.
duke@435 968 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 969 if (size == 0) {
duke@435 970 // An unparsable object found. Signal early termination.
duke@435 971 return addr;
duke@435 972 }
duke@435 973 }
duke@435 974 }
duke@435 975 return NULL;
duke@435 976 }
duke@435 977
duke@435 978
ysr@777 979 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 980 NOT_PRODUCT(verify_objects_initialized());
duke@435 981 return _bt.block_start(p);
duke@435 982 }
duke@435 983
duke@435 984 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 985 return _bt.block_start_careful(p);
duke@435 986 }
duke@435 987
duke@435 988 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 989 NOT_PRODUCT(verify_objects_initialized());
duke@435 990 // This must be volatile, or else there is a danger that the compiler
duke@435 991 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 992 // the value read the first time in a register.
duke@435 993 while (true) {
duke@435 994 // We must do this until we get a consistent view of the object.
coleenp@622 995 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 996 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 997 size_t res = fc->size();
coleenp@622 998 // If the object is still a free chunk, return the size, else it
coleenp@622 999 // has been allocated so try again.
coleenp@622 1000 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1001 assert(res != 0, "Block size should not be 0");
duke@435 1002 return res;
duke@435 1003 }
coleenp@622 1004 } else {
coleenp@622 1005 // must read from what 'p' points to in each loop.
coleenp@622 1006 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1007 if (k != NULL) {
ysr@2071 1008 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
coleenp@622 1009 oop o = (oop)p;
coleenp@622 1010 assert(o->is_parsable(), "Should be parsable");
coleenp@622 1011 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 1012 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1013 res = adjustObjectSize(res);
coleenp@622 1014 assert(res != 0, "Block size should not be 0");
coleenp@622 1015 return res;
coleenp@622 1016 }
duke@435 1017 }
duke@435 1018 }
duke@435 1019 }
duke@435 1020
duke@435 1021 // A variant of the above that uses the Printezis bits for
duke@435 1022 // unparsable but allocated objects. This avoids any possible
duke@435 1023 // stalls waiting for mutators to initialize objects, and is
duke@435 1024 // thus potentially faster than the variant above. However,
duke@435 1025 // this variant may return a zero size for a block that is
duke@435 1026 // under mutation and for which a consistent size cannot be
duke@435 1027 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1028 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1029 const CMSCollector* c)
duke@435 1030 const {
duke@435 1031 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1032 // This must be volatile, or else there is a danger that the compiler
duke@435 1033 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1034 // the value read the first time in a register.
duke@435 1035 DEBUG_ONLY(uint loops = 0;)
duke@435 1036 while (true) {
duke@435 1037 // We must do this until we get a consistent view of the object.
coleenp@622 1038 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1039 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1040 size_t res = fc->size();
coleenp@622 1041 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1042 assert(res != 0, "Block size should not be 0");
duke@435 1043 assert(loops == 0, "Should be 0");
duke@435 1044 return res;
duke@435 1045 }
duke@435 1046 } else {
coleenp@622 1047 // must read from what 'p' points to in each loop.
coleenp@622 1048 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1049 // We trust the size of any object that has a non-NULL
ysr@2533 1050 // klass and (for those in the perm gen) is parsable
ysr@2533 1051 // -- irrespective of its conc_safe-ty.
ysr@2533 1052 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
coleenp@622 1053 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 1054 oop o = (oop)p;
coleenp@622 1055 assert(o->is_oop(), "Should be an oop");
coleenp@622 1056 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1057 res = adjustObjectSize(res);
coleenp@622 1058 assert(res != 0, "Block size should not be 0");
coleenp@622 1059 return res;
coleenp@622 1060 } else {
ysr@2533 1061 // May return 0 if P-bits not present.
coleenp@622 1062 return c->block_size_if_printezis_bits(p);
coleenp@622 1063 }
duke@435 1064 }
duke@435 1065 assert(loops == 0, "Can loop at most once");
duke@435 1066 DEBUG_ONLY(loops++;)
duke@435 1067 }
duke@435 1068 }
duke@435 1069
duke@435 1070 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1071 NOT_PRODUCT(verify_objects_initialized());
duke@435 1072 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1073 FreeChunk* fc = (FreeChunk*)p;
jmasa@3732 1074 if (fc->is_free()) {
duke@435 1075 return fc->size();
duke@435 1076 } else {
duke@435 1077 // Ignore mark word because this may be a recently promoted
duke@435 1078 // object whose mark word is used to chain together grey
duke@435 1079 // objects (the last one would have a null value).
duke@435 1080 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1081 return adjustObjectSize(oop(p)->size());
duke@435 1082 }
duke@435 1083 }
duke@435 1084
duke@435 1085 // This implementation assumes that the property of "being an object" is
duke@435 1086 // stable. But being a free chunk may not be (because of parallel
duke@435 1087 // promotion.)
duke@435 1088 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1089 FreeChunk* fc = (FreeChunk*)p;
duke@435 1090 assert(is_in_reserved(p), "Should be in space");
duke@435 1091 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1092 // assertion may fail because prepare_for_compaction() uses
duke@435 1093 // space that is garbage to maintain information on ranges of
duke@435 1094 // live objects so that these live ranges can be moved as a whole.
duke@435 1095 // Comment out this assertion until that problem can be solved
duke@435 1096 // (i.e., that the block start calculation may look at objects
duke@435 1097 // at address below "p" in finding the object that contains "p"
duke@435 1098 // and those objects (if garbage) may have been modified to hold
duke@435 1099 // live range information.
jmasa@2188 1100 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1101 // "Should be a block boundary");
coleenp@622 1102 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1103 klassOop k = oop(p)->klass_or_null();
duke@435 1104 if (k != NULL) {
duke@435 1105 // Ignore mark word because it may have been used to
duke@435 1106 // chain together promoted objects (the last one
duke@435 1107 // would have a null value).
duke@435 1108 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1109 return true;
duke@435 1110 } else {
duke@435 1111 return false; // Was not an object at the start of collection.
duke@435 1112 }
duke@435 1113 }
duke@435 1114
duke@435 1115 // Check if the object is alive. This fact is checked either by consulting
duke@435 1116 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1117 // generation and we're not in the sweeping phase, by checking the
duke@435 1118 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1119 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1120 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1121 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1122 "Else races are possible");
ysr@2293 1123 assert(block_is_obj(p), "The address should point to an object");
duke@435 1124
duke@435 1125 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1126 // for both perm gen and old gen.
duke@435 1127 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1128 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1129 // main marking bit map (live_map below) is locked,
duke@435 1130 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1131 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1132 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1133 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1134 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1135 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1136 // if/when the perm gen goes away in the future.
duke@435 1137 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1138 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1139 return live_map->par_isMarked((HeapWord*) p);
duke@435 1140 } else {
duke@435 1141 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1142 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1143 // in the perm gen. In this case, we use the "deadness" information
duke@435 1144 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1145 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1146 if (_collector->verifying()) {
duke@435 1147 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1148 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1149 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1150 // the object is alive.
duke@435 1151 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1152 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1153 } else {
duke@435 1154 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1155 }
duke@435 1156 }
duke@435 1157 }
duke@435 1158 return true;
duke@435 1159 }
duke@435 1160
duke@435 1161 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1162 FreeChunk* fc = (FreeChunk*)p;
duke@435 1163 assert(is_in_reserved(p), "Should be in space");
duke@435 1164 assert(_bt.block_start(p) == p, "Should be a block boundary");
jmasa@3732 1165 if (!fc->is_free()) {
duke@435 1166 // Ignore mark word because it may have been used to
duke@435 1167 // chain together promoted objects (the last one
duke@435 1168 // would have a null value).
duke@435 1169 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1170 return true;
duke@435 1171 }
duke@435 1172 return false;
duke@435 1173 }
duke@435 1174
duke@435 1175 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1176 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1177 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1178 size_t size = 0;
duke@435 1179 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1180 debug_only(
duke@435 1181 // We may be calling here without the lock in which case we
duke@435 1182 // won't do this modest sanity check.
duke@435 1183 if (freelistLock()->owned_by_self()) {
duke@435 1184 size_t total_list_size = 0;
duke@435 1185 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1186 fc = fc->next()) {
duke@435 1187 total_list_size += i;
duke@435 1188 }
duke@435 1189 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1190 "Count in list is incorrect");
duke@435 1191 }
duke@435 1192 )
duke@435 1193 size += i * _indexedFreeList[i].count();
duke@435 1194 }
duke@435 1195 return size;
duke@435 1196 }
duke@435 1197
duke@435 1198 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1199 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1200 return allocate(size);
duke@435 1201 }
duke@435 1202
duke@435 1203 HeapWord*
duke@435 1204 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1205 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1206 }
duke@435 1207
duke@435 1208 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1209 assert_lock_strong(freelistLock());
duke@435 1210 HeapWord* res = NULL;
duke@435 1211 assert(size == adjustObjectSize(size),
duke@435 1212 "use adjustObjectSize() before calling into allocate()");
duke@435 1213
duke@435 1214 if (_adaptive_freelists) {
duke@435 1215 res = allocate_adaptive_freelists(size);
duke@435 1216 } else { // non-adaptive free lists
duke@435 1217 res = allocate_non_adaptive_freelists(size);
duke@435 1218 }
duke@435 1219
duke@435 1220 if (res != NULL) {
duke@435 1221 // check that res does lie in this space!
duke@435 1222 assert(is_in_reserved(res), "Not in this space!");
duke@435 1223 assert(is_aligned((void*)res), "alignment check");
duke@435 1224
duke@435 1225 FreeChunk* fc = (FreeChunk*)res;
duke@435 1226 fc->markNotFree();
jmasa@3732 1227 assert(!fc->is_free(), "shouldn't be marked free");
coleenp@622 1228 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1229 // Verify that the block offset table shows this to
duke@435 1230 // be a single block, but not one which is unallocated.
duke@435 1231 _bt.verify_single_block(res, size);
duke@435 1232 _bt.verify_not_unallocated(res, size);
duke@435 1233 // mangle a just allocated object with a distinct pattern.
duke@435 1234 debug_only(fc->mangleAllocated(size));
duke@435 1235 }
duke@435 1236
duke@435 1237 return res;
duke@435 1238 }
duke@435 1239
duke@435 1240 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1241 HeapWord* res = NULL;
duke@435 1242 // try and use linear allocation for smaller blocks
duke@435 1243 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1244 // if successful, the following also adjusts block offset table
duke@435 1245 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1246 }
duke@435 1247 // Else triage to indexed lists for smaller sizes
duke@435 1248 if (res == NULL) {
duke@435 1249 if (size < SmallForDictionary) {
duke@435 1250 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1251 } else {
duke@435 1252 // else get it from the big dictionary; if even this doesn't
duke@435 1253 // work we are out of luck.
duke@435 1254 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1255 }
duke@435 1256 }
duke@435 1257
duke@435 1258 return res;
duke@435 1259 }
duke@435 1260
duke@435 1261 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1262 assert_lock_strong(freelistLock());
duke@435 1263 HeapWord* res = NULL;
duke@435 1264 assert(size == adjustObjectSize(size),
duke@435 1265 "use adjustObjectSize() before calling into allocate()");
duke@435 1266
duke@435 1267 // Strategy
duke@435 1268 // if small
duke@435 1269 // exact size from small object indexed list if small
duke@435 1270 // small or large linear allocation block (linAB) as appropriate
duke@435 1271 // take from lists of greater sized chunks
duke@435 1272 // else
duke@435 1273 // dictionary
duke@435 1274 // small or large linear allocation block if it has the space
duke@435 1275 // Try allocating exact size from indexTable first
duke@435 1276 if (size < IndexSetSize) {
duke@435 1277 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1278 if(res != NULL) {
duke@435 1279 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1280 "Not removed from free list");
duke@435 1281 // no block offset table adjustment is necessary on blocks in
duke@435 1282 // the indexed lists.
duke@435 1283
duke@435 1284 // Try allocating from the small LinAB
duke@435 1285 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1286 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1287 // if successful, the above also adjusts block offset table
duke@435 1288 // Note that this call will refill the LinAB to
duke@435 1289 // satisfy the request. This is different that
duke@435 1290 // evm.
duke@435 1291 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1292 } else {
duke@435 1293 // Raid the exact free lists larger than size, even if they are not
duke@435 1294 // overpopulated.
duke@435 1295 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1296 }
duke@435 1297 } else {
duke@435 1298 // Big objects get allocated directly from the dictionary.
duke@435 1299 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1300 if (res == NULL) {
duke@435 1301 // Try hard not to fail since an allocation failure will likely
duke@435 1302 // trigger a synchronous GC. Try to get the space from the
duke@435 1303 // allocation blocks.
duke@435 1304 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1305 }
duke@435 1306 }
duke@435 1307
duke@435 1308 return res;
duke@435 1309 }
duke@435 1310
duke@435 1311 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1312 // when promoting obj.
duke@435 1313 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1314 // Depending on the object size, expansion may require refilling either a
duke@435 1315 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1316 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1317 size_t space = obj_size;
duke@435 1318 if (!_adaptive_freelists) {
duke@435 1319 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1320 }
duke@435 1321 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1322 return space;
duke@435 1323 }
duke@435 1324
duke@435 1325 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1326 FreeChunk* ret;
duke@435 1327
duke@435 1328 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1329 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1330 "Should not be here");
duke@435 1331
duke@435 1332 size_t i;
duke@435 1333 size_t currSize = numWords + MinChunkSize;
duke@435 1334 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1335 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
jmasa@3730 1336 FreeList<FreeChunk>* fl = &_indexedFreeList[i];
duke@435 1337 if (fl->head()) {
duke@435 1338 ret = getFromListGreater(fl, numWords);
jmasa@3732 1339 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1340 return ret;
duke@435 1341 }
duke@435 1342 }
duke@435 1343
duke@435 1344 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1345 (size_t)(numWords + MinChunkSize));
duke@435 1346
duke@435 1347 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1348 fragmentation that can't be handled. */
duke@435 1349 {
jmasa@3732 1350 ret = dictionary()->get_chunk(currSize);
duke@435 1351 if (ret != NULL) {
duke@435 1352 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1353 "Chunk is too small");
duke@435 1354 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1355 /* Carve returned chunk. */
duke@435 1356 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1357 /* Label this as no longer a free chunk. */
jmasa@3732 1358 assert(ret->is_free(), "This chunk should be free");
jmasa@3732 1359 ret->link_prev(NULL);
duke@435 1360 }
jmasa@3732 1361 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1362 return ret;
duke@435 1363 }
duke@435 1364 ShouldNotReachHere();
duke@435 1365 }
duke@435 1366
ysr@3220 1367 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1368 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
jmasa@3732 1369 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
duke@435 1370 }
duke@435 1371
ysr@3220 1372 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1373 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1374 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1375 "Linear allocation block shows incorrect size");
ysr@3220 1376 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1377 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1378 }
ysr@3220 1379
ysr@3220 1380 // Check if the purported free chunk is present either as a linear
ysr@3220 1381 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1382 // or the larger free blocks kept in the binary tree dictionary.
jmasa@3732 1383 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
ysr@3220 1384 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1385 return true;
ysr@3220 1386 } else if (fc->size() < IndexSetSize) {
ysr@3220 1387 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1388 } else {
jmasa@3732 1389 return dictionary()->verify_chunk_in_free_list(fc);
duke@435 1390 }
duke@435 1391 }
duke@435 1392
duke@435 1393 #ifndef PRODUCT
duke@435 1394 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1395 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1396 }
ysr@1580 1397
ysr@1580 1398 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1399 CMSLockVerifier::assert_locked(lock);
ysr@1580 1400 }
duke@435 1401 #endif
duke@435 1402
duke@435 1403 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1404 // In the parallel case, the main thread holds the free list lock
duke@435 1405 // on behalf the parallel threads.
duke@435 1406 FreeChunk* fc;
duke@435 1407 {
duke@435 1408 // If GC is parallel, this might be called by several threads.
duke@435 1409 // This should be rare enough that the locking overhead won't affect
duke@435 1410 // the sequential code.
duke@435 1411 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1412 Mutex::_no_safepoint_check_flag);
duke@435 1413 fc = getChunkFromDictionary(size);
duke@435 1414 }
duke@435 1415 if (fc != NULL) {
duke@435 1416 fc->dontCoalesce();
jmasa@3732 1417 assert(fc->is_free(), "Should be free, but not coalescable");
duke@435 1418 // Verify that the block offset table shows this to
duke@435 1419 // be a single block, but not one which is unallocated.
duke@435 1420 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1421 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1422 }
duke@435 1423 return fc;
duke@435 1424 }
duke@435 1425
coleenp@548 1426 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1427 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1428 assert_locked();
duke@435 1429
duke@435 1430 // if we are tracking promotions, then first ensure space for
duke@435 1431 // promotion (including spooling space for saving header if necessary).
duke@435 1432 // then allocate and copy, then track promoted info if needed.
duke@435 1433 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1434 // be displaced and in this case restoration of the mark word
duke@435 1435 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1436 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1437 return NULL;
duke@435 1438 }
duke@435 1439 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1440 // additional call through the allocate(size_t) form. Having
duke@435 1441 // the compile inline the call is problematic because allocate(size_t)
duke@435 1442 // is a virtual method.
duke@435 1443 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1444 if (res != NULL) {
duke@435 1445 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1446 // if we should be tracking promotions, do so.
duke@435 1447 if (_promoInfo.tracking()) {
duke@435 1448 _promoInfo.track((PromotedObject*)res);
duke@435 1449 }
duke@435 1450 }
duke@435 1451 return oop(res);
duke@435 1452 }
duke@435 1453
duke@435 1454 HeapWord*
duke@435 1455 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1456 assert_locked();
duke@435 1457 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1458 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1459 "maximum from smallLinearAllocBlock");
duke@435 1460 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1461 }
duke@435 1462
duke@435 1463 HeapWord*
duke@435 1464 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1465 size_t size) {
duke@435 1466 assert_locked();
duke@435 1467 assert(size >= MinChunkSize, "too small");
duke@435 1468 HeapWord* res = NULL;
duke@435 1469 // Try to do linear allocation from blk, making sure that
duke@435 1470 if (blk->_word_size == 0) {
duke@435 1471 // We have probably been unable to fill this either in the prologue or
duke@435 1472 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1473 // next time.
duke@435 1474 assert(blk->_ptr == NULL, "consistency check");
duke@435 1475 return NULL;
duke@435 1476 }
duke@435 1477 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1478 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1479 if (res != NULL) return res;
duke@435 1480
duke@435 1481 // about to exhaust this linear allocation block
duke@435 1482 if (blk->_word_size == size) { // exactly satisfied
duke@435 1483 res = blk->_ptr;
duke@435 1484 _bt.allocated(res, blk->_word_size);
duke@435 1485 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1486 size_t sz = blk->_word_size;
duke@435 1487 // Update _unallocated_block if the size is such that chunk would be
duke@435 1488 // returned to the indexed free list. All other chunks in the indexed
duke@435 1489 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1490 // has already been adjusted for them. Do it here so that the cost
duke@435 1491 // for all chunks added back to the indexed free lists.
ysr@1580 1492 if (sz < SmallForDictionary) {
ysr@1580 1493 _bt.allocated(blk->_ptr, sz);
duke@435 1494 }
duke@435 1495 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1496 addChunkToFreeLists(blk->_ptr, sz);
jmasa@3732 1497 split_birth(sz);
duke@435 1498 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1499 } else {
duke@435 1500 // A refilled block would not satisfy the request.
duke@435 1501 return NULL;
duke@435 1502 }
duke@435 1503
duke@435 1504 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1505 refillLinearAllocBlock(blk);
duke@435 1506 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1507 "block was replenished");
duke@435 1508 if (res != NULL) {
jmasa@3732 1509 split_birth(size);
duke@435 1510 repairLinearAllocBlock(blk);
duke@435 1511 } else if (blk->_ptr != NULL) {
duke@435 1512 res = blk->_ptr;
duke@435 1513 size_t blk_size = blk->_word_size;
duke@435 1514 blk->_word_size -= size;
duke@435 1515 blk->_ptr += size;
jmasa@3732 1516 split_birth(size);
duke@435 1517 repairLinearAllocBlock(blk);
duke@435 1518 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1519 // view of the BOT and free blocks.
duke@435 1520 // Above must occur before BOT is updated below.
ysr@2071 1521 OrderAccess::storestore();
duke@435 1522 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1523 }
duke@435 1524 return res;
duke@435 1525 }
duke@435 1526
duke@435 1527 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1528 LinearAllocBlock* blk,
duke@435 1529 size_t size) {
duke@435 1530 assert_locked();
duke@435 1531 assert(size >= MinChunkSize, "too small");
duke@435 1532
duke@435 1533 HeapWord* res = NULL;
duke@435 1534 // This is the common case. Keep it simple.
duke@435 1535 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1536 assert(blk->_ptr != NULL, "consistency check");
duke@435 1537 res = blk->_ptr;
duke@435 1538 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1539 // indicates the start of the linAB. The split_block() updates the
duke@435 1540 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1541 // next chunk to be allocated).
duke@435 1542 size_t blk_size = blk->_word_size;
duke@435 1543 blk->_word_size -= size;
duke@435 1544 blk->_ptr += size;
jmasa@3732 1545 split_birth(size);
duke@435 1546 repairLinearAllocBlock(blk);
duke@435 1547 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1548 // view of the BOT and free blocks.
duke@435 1549 // Above must occur before BOT is updated below.
ysr@2071 1550 OrderAccess::storestore();
duke@435 1551 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1552 _bt.allocated(res, size);
duke@435 1553 }
duke@435 1554 return res;
duke@435 1555 }
duke@435 1556
duke@435 1557 FreeChunk*
duke@435 1558 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1559 assert_locked();
duke@435 1560 assert(size < SmallForDictionary, "just checking");
duke@435 1561 FreeChunk* res;
jmasa@3732 1562 res = _indexedFreeList[size].get_chunk_at_head();
duke@435 1563 if (res == NULL) {
duke@435 1564 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1565 }
duke@435 1566 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1567 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1568 return res;
duke@435 1569 }
duke@435 1570
duke@435 1571 FreeChunk*
ysr@1580 1572 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1573 bool replenish) {
duke@435 1574 assert_locked();
duke@435 1575 FreeChunk* fc = NULL;
duke@435 1576 if (size < SmallForDictionary) {
duke@435 1577 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1578 _indexedFreeList[size].surplus() <= 0,
duke@435 1579 "List for this size should be empty or under populated");
duke@435 1580 // Try best fit in exact lists before replenishing the list
duke@435 1581 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1582 // Replenish list.
duke@435 1583 //
duke@435 1584 // Things tried that failed.
duke@435 1585 // Tried allocating out of the two LinAB's first before
duke@435 1586 // replenishing lists.
duke@435 1587 // Tried small linAB of size 256 (size in indexed list)
duke@435 1588 // and replenishing indexed lists from the small linAB.
duke@435 1589 //
duke@435 1590 FreeChunk* newFc = NULL;
ysr@1580 1591 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1592 if (replenish_size < SmallForDictionary) {
duke@435 1593 // Do not replenish from an underpopulated size.
duke@435 1594 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1595 _indexedFreeList[replenish_size].head() != NULL) {
jmasa@3732 1596 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
ysr@1580 1597 } else if (bestFitFirst()) {
duke@435 1598 newFc = bestFitSmall(replenish_size);
duke@435 1599 }
duke@435 1600 }
ysr@1580 1601 if (newFc == NULL && replenish_size > size) {
ysr@1580 1602 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1603 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1604 }
ysr@1580 1605 // Note: The stats update re split-death of block obtained above
ysr@1580 1606 // will be recorded below precisely when we know we are going to
ysr@1580 1607 // be actually splitting it into more than one pieces below.
duke@435 1608 if (newFc != NULL) {
ysr@1580 1609 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1610 // Replenish this list and return one block to caller.
ysr@1580 1611 size_t i;
ysr@1580 1612 FreeChunk *curFc, *nextFc;
ysr@1580 1613 size_t num_blk = newFc->size() / size;
ysr@1580 1614 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1615 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1616 if (num_blk > 1) {
ysr@1580 1617 // we are sure we will be splitting the block just obtained
ysr@1580 1618 // into multiple pieces; record the split-death of the original
ysr@1580 1619 splitDeath(replenish_size);
ysr@1580 1620 }
ysr@1580 1621 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1622 // The last chunk is not added to the lists but is returned as the
ysr@1580 1623 // free chunk.
ysr@1580 1624 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1625 i = 0;
ysr@1580 1626 i < (num_blk - 1);
ysr@1580 1627 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1628 i++) {
jmasa@3732 1629 curFc->set_size(size);
ysr@1580 1630 // Don't record this as a return in order to try and
ysr@1580 1631 // determine the "returns" from a GC.
ysr@1580 1632 _bt.verify_not_unallocated((HeapWord*) fc, size);
jmasa@3732 1633 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
ysr@1580 1634 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1635 split_birth(size);
ysr@1580 1636 // Don't record the initial population of the indexed list
ysr@1580 1637 // as a split birth.
ysr@1580 1638 }
ysr@1580 1639
ysr@1580 1640 // check that the arithmetic was OK above
ysr@1580 1641 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1642 "inconsistency in carving newFc");
jmasa@3732 1643 curFc->set_size(size);
duke@435 1644 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1645 split_birth(size);
ysr@1580 1646 fc = curFc;
ysr@1580 1647 } else {
ysr@1580 1648 // Return entire block to caller
ysr@1580 1649 fc = newFc;
duke@435 1650 }
duke@435 1651 }
duke@435 1652 }
duke@435 1653 } else {
duke@435 1654 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1655 // replenish the indexed free list.
duke@435 1656 fc = getChunkFromDictionaryExact(size);
duke@435 1657 }
jmasa@3732 1658 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
duke@435 1659 return fc;
duke@435 1660 }
duke@435 1661
duke@435 1662 FreeChunk*
duke@435 1663 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1664 assert_locked();
jmasa@3732 1665 FreeChunk* fc = _dictionary->get_chunk(size);
duke@435 1666 if (fc == NULL) {
duke@435 1667 return NULL;
duke@435 1668 }
duke@435 1669 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1670 if (fc->size() >= size + MinChunkSize) {
duke@435 1671 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1672 }
duke@435 1673 assert(fc->size() >= size, "chunk too small");
duke@435 1674 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1675 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1676 return fc;
duke@435 1677 }
duke@435 1678
duke@435 1679 FreeChunk*
duke@435 1680 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1681 assert_locked();
jmasa@3732 1682 FreeChunk* fc = _dictionary->get_chunk(size);
duke@435 1683 if (fc == NULL) {
duke@435 1684 return fc;
duke@435 1685 }
duke@435 1686 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1687 if (fc->size() == size) {
duke@435 1688 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1689 return fc;
duke@435 1690 }
jmasa@3732 1691 assert(fc->size() > size, "get_chunk() guarantee");
duke@435 1692 if (fc->size() < size + MinChunkSize) {
duke@435 1693 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1694 returnChunkToDictionary(fc);
jmasa@3732 1695 fc = _dictionary->get_chunk(size + MinChunkSize);
duke@435 1696 if (fc == NULL) {
duke@435 1697 return NULL;
duke@435 1698 }
duke@435 1699 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1700 }
duke@435 1701 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1702 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1703 assert(fc->size() == size, "chunk is wrong size");
duke@435 1704 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1705 return fc;
duke@435 1706 }
duke@435 1707
duke@435 1708 void
duke@435 1709 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1710 assert_locked();
duke@435 1711
duke@435 1712 size_t size = chunk->size();
duke@435 1713 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1714 // adjust _unallocated_block downward, as necessary
duke@435 1715 _bt.freed((HeapWord*)chunk, size);
jmasa@3732 1716 _dictionary->return_chunk(chunk);
ysr@1580 1717 #ifndef PRODUCT
ysr@1580 1718 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
jmasa@3730 1719 TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1720 }
ysr@1580 1721 #endif // PRODUCT
duke@435 1722 }
duke@435 1723
duke@435 1724 void
duke@435 1725 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1726 assert_locked();
duke@435 1727 size_t size = fc->size();
duke@435 1728 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1729 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1730 if (_adaptive_freelists) {
jmasa@3732 1731 _indexedFreeList[size].return_chunk_at_tail(fc);
duke@435 1732 } else {
jmasa@3732 1733 _indexedFreeList[size].return_chunk_at_head(fc);
duke@435 1734 }
ysr@1580 1735 #ifndef PRODUCT
ysr@1580 1736 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1737 _indexedFreeList[size].verify_stats();
ysr@1580 1738 }
ysr@1580 1739 #endif // PRODUCT
duke@435 1740 }
duke@435 1741
duke@435 1742 // Add chunk to end of last block -- if it's the largest
duke@435 1743 // block -- and update BOT and census data. We would
duke@435 1744 // of course have preferred to coalesce it with the
duke@435 1745 // last block, but it's currently less expensive to find the
duke@435 1746 // largest block than it is to find the last.
duke@435 1747 void
duke@435 1748 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1749 HeapWord* chunk, size_t size) {
duke@435 1750 // check that the chunk does lie in this space!
duke@435 1751 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1752 // One of the parallel gc task threads may be here
duke@435 1753 // whilst others are allocating.
duke@435 1754 Mutex* lock = NULL;
duke@435 1755 if (ParallelGCThreads != 0) {
duke@435 1756 lock = &_parDictionaryAllocLock;
duke@435 1757 }
duke@435 1758 FreeChunk* ec;
duke@435 1759 {
duke@435 1760 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
jmasa@3732 1761 ec = dictionary()->find_largest_dict(); // get largest block
duke@435 1762 if (ec != NULL && ec->end() == chunk) {
duke@435 1763 // It's a coterminal block - we can coalesce.
duke@435 1764 size_t old_size = ec->size();
duke@435 1765 coalDeath(old_size);
duke@435 1766 removeChunkFromDictionary(ec);
duke@435 1767 size += old_size;
duke@435 1768 } else {
duke@435 1769 ec = (FreeChunk*)chunk;
duke@435 1770 }
duke@435 1771 }
jmasa@3732 1772 ec->set_size(size);
duke@435 1773 debug_only(ec->mangleFreed(size));
duke@435 1774 if (size < SmallForDictionary) {
duke@435 1775 lock = _indexedFreeListParLocks[size];
duke@435 1776 }
duke@435 1777 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1778 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1779 // record the birth under the lock since the recording involves
duke@435 1780 // manipulation of the list on which the chunk lives and
duke@435 1781 // if the chunk is allocated and is the last on the list,
duke@435 1782 // the list can go away.
duke@435 1783 coalBirth(size);
duke@435 1784 }
duke@435 1785
duke@435 1786 void
duke@435 1787 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1788 size_t size) {
duke@435 1789 // check that the chunk does lie in this space!
duke@435 1790 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1791 assert_locked();
duke@435 1792 _bt.verify_single_block(chunk, size);
duke@435 1793
duke@435 1794 FreeChunk* fc = (FreeChunk*) chunk;
jmasa@3732 1795 fc->set_size(size);
duke@435 1796 debug_only(fc->mangleFreed(size));
duke@435 1797 if (size < SmallForDictionary) {
duke@435 1798 returnChunkToFreeList(fc);
duke@435 1799 } else {
duke@435 1800 returnChunkToDictionary(fc);
duke@435 1801 }
duke@435 1802 }
duke@435 1803
duke@435 1804 void
duke@435 1805 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1806 size_t size, bool coalesced) {
duke@435 1807 assert_locked();
duke@435 1808 assert(chunk != NULL, "null chunk");
duke@435 1809 if (coalesced) {
duke@435 1810 // repair BOT
duke@435 1811 _bt.single_block(chunk, size);
duke@435 1812 }
duke@435 1813 addChunkToFreeLists(chunk, size);
duke@435 1814 }
duke@435 1815
duke@435 1816 // We _must_ find the purported chunk on our free lists;
duke@435 1817 // we assert if we don't.
duke@435 1818 void
duke@435 1819 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1820 size_t size = fc->size();
duke@435 1821 assert_locked();
duke@435 1822 debug_only(verifyFreeLists());
duke@435 1823 if (size < SmallForDictionary) {
duke@435 1824 removeChunkFromIndexedFreeList(fc);
duke@435 1825 } else {
duke@435 1826 removeChunkFromDictionary(fc);
duke@435 1827 }
duke@435 1828 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1829 debug_only(verifyFreeLists());
duke@435 1830 }
duke@435 1831
duke@435 1832 void
duke@435 1833 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1834 size_t size = fc->size();
duke@435 1835 assert_locked();
duke@435 1836 assert(fc != NULL, "null chunk");
duke@435 1837 _bt.verify_single_block((HeapWord*)fc, size);
jmasa@3732 1838 _dictionary->remove_chunk(fc);
duke@435 1839 // adjust _unallocated_block upward, as necessary
duke@435 1840 _bt.allocated((HeapWord*)fc, size);
duke@435 1841 }
duke@435 1842
duke@435 1843 void
duke@435 1844 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1845 assert_locked();
duke@435 1846 size_t size = fc->size();
duke@435 1847 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1848 NOT_PRODUCT(
duke@435 1849 if (FLSVerifyIndexTable) {
duke@435 1850 verifyIndexedFreeList(size);
duke@435 1851 }
duke@435 1852 )
jmasa@3732 1853 _indexedFreeList[size].remove_chunk(fc);
duke@435 1854 NOT_PRODUCT(
duke@435 1855 if (FLSVerifyIndexTable) {
duke@435 1856 verifyIndexedFreeList(size);
duke@435 1857 }
duke@435 1858 )
duke@435 1859 }
duke@435 1860
duke@435 1861 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1862 /* A hint is the next larger size that has a surplus.
duke@435 1863 Start search at a size large enough to guarantee that
duke@435 1864 the excess is >= MIN_CHUNK. */
duke@435 1865 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1866 if (start < IndexSetSize) {
jmasa@3730 1867 FreeList<FreeChunk>* it = _indexedFreeList;
duke@435 1868 size_t hint = _indexedFreeList[start].hint();
duke@435 1869 while (hint < IndexSetSize) {
duke@435 1870 assert(hint % MinObjAlignment == 0, "hint should be aligned");
jmasa@3730 1871 FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
duke@435 1872 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1873 // Found a list with surplus, reset original hint
duke@435 1874 // and split out a free chunk which is returned.
duke@435 1875 _indexedFreeList[start].set_hint(hint);
duke@435 1876 FreeChunk* res = getFromListGreater(fl, numWords);
jmasa@3732 1877 assert(res == NULL || res->is_free(),
duke@435 1878 "Should be returning a free chunk");
duke@435 1879 return res;
duke@435 1880 }
duke@435 1881 hint = fl->hint(); /* keep looking */
duke@435 1882 }
duke@435 1883 /* None found. */
duke@435 1884 it[start].set_hint(IndexSetSize);
duke@435 1885 }
duke@435 1886 return NULL;
duke@435 1887 }
duke@435 1888
duke@435 1889 /* Requires fl->size >= numWords + MinChunkSize */
jmasa@3730 1890 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
duke@435 1891 size_t numWords) {
duke@435 1892 FreeChunk *curr = fl->head();
duke@435 1893 size_t oldNumWords = curr->size();
duke@435 1894 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1895 assert(curr != NULL, "List is empty");
duke@435 1896 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1897 "Size of chunks in the list is too small");
duke@435 1898
jmasa@3732 1899 fl->remove_chunk(curr);
duke@435 1900 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1901 // smallSplit(oldNumWords, numWords);
duke@435 1902 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1903 // Does anything have to be done for the remainder in terms of
duke@435 1904 // fixing the card table?
jmasa@3732 1905 assert(new_chunk == NULL || new_chunk->is_free(),
duke@435 1906 "Should be returning a free chunk");
duke@435 1907 return new_chunk;
duke@435 1908 }
duke@435 1909
duke@435 1910 FreeChunk*
duke@435 1911 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1912 size_t new_size) {
duke@435 1913 assert_locked();
duke@435 1914 size_t size = chunk->size();
duke@435 1915 assert(size > new_size, "Split from a smaller block?");
duke@435 1916 assert(is_aligned(chunk), "alignment problem");
duke@435 1917 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1918 size_t rem_size = size - new_size;
duke@435 1919 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1920 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1921 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1922 assert(is_aligned(ffc), "alignment problem");
jmasa@3732 1923 ffc->set_size(rem_size);
jmasa@3732 1924 ffc->link_next(NULL);
jmasa@3732 1925 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1926 // Above must occur before BOT is updated below.
duke@435 1927 // adjust block offset table
ysr@2071 1928 OrderAccess::storestore();
jmasa@3732 1929 assert(chunk->is_free() && ffc->is_free(), "Error");
duke@435 1930 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1931 if (rem_size < SmallForDictionary) {
duke@435 1932 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1933 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1934 assert(!is_par ||
jmasa@3294 1935 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1936 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1937 returnChunkToFreeList(ffc);
duke@435 1938 split(size, rem_size);
duke@435 1939 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1940 } else {
duke@435 1941 returnChunkToDictionary(ffc);
duke@435 1942 split(size ,rem_size);
duke@435 1943 }
jmasa@3732 1944 chunk->set_size(new_size);
duke@435 1945 return chunk;
duke@435 1946 }
duke@435 1947
duke@435 1948 void
duke@435 1949 CompactibleFreeListSpace::sweep_completed() {
duke@435 1950 // Now that space is probably plentiful, refill linear
duke@435 1951 // allocation blocks as needed.
duke@435 1952 refillLinearAllocBlocksIfNeeded();
duke@435 1953 }
duke@435 1954
duke@435 1955 void
duke@435 1956 CompactibleFreeListSpace::gc_prologue() {
duke@435 1957 assert_locked();
duke@435 1958 if (PrintFLSStatistics != 0) {
duke@435 1959 gclog_or_tty->print("Before GC:\n");
duke@435 1960 reportFreeListStatistics();
duke@435 1961 }
duke@435 1962 refillLinearAllocBlocksIfNeeded();
duke@435 1963 }
duke@435 1964
duke@435 1965 void
duke@435 1966 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1967 assert_locked();
duke@435 1968 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1969 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1970 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1971 }
duke@435 1972 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1973 _promoInfo.stopTrackingPromotions();
duke@435 1974 repairLinearAllocationBlocks();
duke@435 1975 // Print Space's stats
duke@435 1976 if (PrintFLSStatistics != 0) {
duke@435 1977 gclog_or_tty->print("After GC:\n");
duke@435 1978 reportFreeListStatistics();
duke@435 1979 }
duke@435 1980 }
duke@435 1981
duke@435 1982 // Iteration support, mostly delegated from a CMS generation
duke@435 1983
duke@435 1984 void CompactibleFreeListSpace::save_marks() {
ysr@2825 1985 assert(Thread::current()->is_VM_thread(),
ysr@2825 1986 "Global variable should only be set when single-threaded");
ysr@2825 1987 // Mark the "end" of the used space at the time of this call;
duke@435 1988 // note, however, that promoted objects from this point
duke@435 1989 // on are tracked in the _promoInfo below.
ysr@2071 1990 set_saved_mark_word(unallocated_block());
ysr@2825 1991 #ifdef ASSERT
ysr@2825 1992 // Check the sanity of save_marks() etc.
ysr@2825 1993 MemRegion ur = used_region();
ysr@2825 1994 MemRegion urasm = used_region_at_save_marks();
ysr@2825 1995 assert(ur.contains(urasm),
ysr@2825 1996 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 1997 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
ysr@2825 1998 ur.start(), ur.end(), urasm.start(), urasm.end()));
ysr@2825 1999 #endif
duke@435 2000 // inform allocator that promotions should be tracked.
duke@435 2001 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 2002 _promoInfo.startTrackingPromotions();
duke@435 2003 }
duke@435 2004
duke@435 2005 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 2006 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 2007 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 2008 "Shouldn't be called if using parallel gc.");
duke@435 2009 return _promoInfo.noPromotions();
duke@435 2010 }
duke@435 2011
duke@435 2012 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2013 \
duke@435 2014 void CompactibleFreeListSpace:: \
duke@435 2015 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2016 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2017 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2018 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2019 /* \
duke@435 2020 * This also restores any displaced headers and removes the elements from \
duke@435 2021 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2022 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2023 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2024 */ \
duke@435 2025 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2026 }
duke@435 2027
duke@435 2028 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2029
duke@435 2030
duke@435 2031 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 2032 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 2033 guarantee(false, "NYI");
duke@435 2034 }
duke@435 2035
ysr@447 2036 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2037 return _smallLinearAllocBlock._word_size == 0;
duke@435 2038 }
duke@435 2039
duke@435 2040 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2041 // Fix up linear allocation blocks to look like free blocks
duke@435 2042 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2043 }
duke@435 2044
duke@435 2045 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2046 assert_locked();
duke@435 2047 if (blk->_ptr != NULL) {
duke@435 2048 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2049 "Minimum block size requirement");
duke@435 2050 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
jmasa@3732 2051 fc->set_size(blk->_word_size);
jmasa@3732 2052 fc->link_prev(NULL); // mark as free
duke@435 2053 fc->dontCoalesce();
jmasa@3732 2054 assert(fc->is_free(), "just marked it free");
duke@435 2055 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2056 }
duke@435 2057 }
duke@435 2058
duke@435 2059 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2060 assert_locked();
duke@435 2061 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2062 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2063 "Size of linAB should be zero if the ptr is NULL");
duke@435 2064 // Reset the linAB refill and allocation size limit.
duke@435 2065 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2066 }
duke@435 2067 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2068 }
duke@435 2069
duke@435 2070 void
duke@435 2071 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2072 assert_locked();
duke@435 2073 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2074 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2075 "blk invariant");
duke@435 2076 if (blk->_ptr == NULL) {
duke@435 2077 refillLinearAllocBlock(blk);
duke@435 2078 }
duke@435 2079 if (PrintMiscellaneous && Verbose) {
duke@435 2080 if (blk->_word_size == 0) {
duke@435 2081 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2082 }
duke@435 2083 }
duke@435 2084 }
duke@435 2085
duke@435 2086 void
duke@435 2087 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2088 assert_locked();
duke@435 2089 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2090 "linear allocation block should be empty");
duke@435 2091 FreeChunk* fc;
duke@435 2092 if (blk->_refillSize < SmallForDictionary &&
duke@435 2093 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2094 // A linAB's strategy might be to use small sizes to reduce
duke@435 2095 // fragmentation but still get the benefits of allocation from a
duke@435 2096 // linAB.
duke@435 2097 } else {
duke@435 2098 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2099 }
duke@435 2100 if (fc != NULL) {
duke@435 2101 blk->_ptr = (HeapWord*)fc;
duke@435 2102 blk->_word_size = fc->size();
duke@435 2103 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2104 }
duke@435 2105 }
duke@435 2106
ysr@447 2107 // Support for concurrent collection policy decisions.
ysr@447 2108 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2109 // In the future we might want to add in frgamentation stats --
ysr@447 2110 // including erosion of the "mountain" into this decision as well.
ysr@447 2111 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2112 }
ysr@447 2113
duke@435 2114 // Support for compaction
duke@435 2115
duke@435 2116 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2117 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2118 // prepare_for_compaction() uses the space between live objects
duke@435 2119 // so that later phase can skip dead space quickly. So verification
duke@435 2120 // of the free lists doesn't work after.
duke@435 2121 }
duke@435 2122
duke@435 2123 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2124 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2125
duke@435 2126 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2127 // In other versions of adjust_pointers(), a bail out
duke@435 2128 // based on the amount of live data in the generation
duke@435 2129 // (i.e., if 0, bail out) may be used.
duke@435 2130 // Cannot test used() == 0 here because the free lists have already
duke@435 2131 // been mangled by the compaction.
duke@435 2132
duke@435 2133 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2134 // See note about verification in prepare_for_compaction().
duke@435 2135 }
duke@435 2136
duke@435 2137 void CompactibleFreeListSpace::compact() {
duke@435 2138 SCAN_AND_COMPACT(obj_size);
duke@435 2139 }
duke@435 2140
duke@435 2141 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2142 // where fbs is free block sizes
duke@435 2143 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2144 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2145 double frag = 0.0;
duke@435 2146 size_t i;
duke@435 2147
duke@435 2148 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2149 double sz = i;
duke@435 2150 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2151 }
duke@435 2152
duke@435 2153 double totFree = itabFree +
jmasa@3732 2154 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
duke@435 2155 if (totFree > 0) {
duke@435 2156 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2157 (totFree * totFree));
duke@435 2158 frag = (double)1.0 - frag;
duke@435 2159 } else {
duke@435 2160 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2161 }
duke@435 2162 return frag;
duke@435 2163 }
duke@435 2164
duke@435 2165 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2166 float inter_sweep_current,
ysr@1580 2167 float inter_sweep_estimate,
ysr@1580 2168 float intra_sweep_estimate) {
duke@435 2169 assert_locked();
duke@435 2170 size_t i;
duke@435 2171 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3730 2172 FreeList<FreeChunk>* fl = &_indexedFreeList[i];
ysr@1580 2173 if (PrintFLSStatistics > 1) {
ysr@1580 2174 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2175 }
ysr@1580 2176 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
jmasa@3732 2177 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
jmasa@3732 2178 fl->set_before_sweep(fl->count());
jmasa@3732 2179 fl->set_bfr_surp(fl->surplus());
duke@435 2180 }
jmasa@3732 2181 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
duke@435 2182 inter_sweep_current,
ysr@1580 2183 inter_sweep_estimate,
ysr@1580 2184 intra_sweep_estimate);
duke@435 2185 }
duke@435 2186
duke@435 2187 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2188 assert_locked();
duke@435 2189 size_t i;
duke@435 2190 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3730 2191 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2192 fl->set_surplus(fl->count() -
ysr@1580 2193 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2194 }
duke@435 2195 }
duke@435 2196
duke@435 2197 void CompactibleFreeListSpace::setFLHints() {
duke@435 2198 assert_locked();
duke@435 2199 size_t i;
duke@435 2200 size_t h = IndexSetSize;
duke@435 2201 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
jmasa@3730 2202 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2203 fl->set_hint(h);
duke@435 2204 if (fl->surplus() > 0) {
duke@435 2205 h = i;
duke@435 2206 }
duke@435 2207 }
duke@435 2208 }
duke@435 2209
duke@435 2210 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2211 assert_locked();
ysr@3264 2212 size_t i;
duke@435 2213 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3730 2214 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2215 fl->set_prev_sweep(fl->count());
jmasa@3732 2216 fl->set_coal_births(0);
jmasa@3732 2217 fl->set_coal_deaths(0);
jmasa@3732 2218 fl->set_split_births(0);
jmasa@3732 2219 fl->set_split_deaths(0);
duke@435 2220 }
duke@435 2221 }
duke@435 2222
ysr@447 2223 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2224 if (PrintFLSStatistics > 0) {
jmasa@3732 2225 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
ysr@1580 2226 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2227 largestAddr);
ysr@1580 2228 }
duke@435 2229 setFLSurplus();
duke@435 2230 setFLHints();
duke@435 2231 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2232 printFLCensus(sweep_count);
duke@435 2233 }
duke@435 2234 clearFLCensus();
duke@435 2235 assert_locked();
jmasa@3732 2236 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
duke@435 2237 }
duke@435 2238
duke@435 2239 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2240 if (size < SmallForDictionary) {
jmasa@3730 2241 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2242 return (fl->coal_desired() < 0) ||
jmasa@3732 2243 ((int)fl->count() > fl->coal_desired());
duke@435 2244 } else {
jmasa@3732 2245 return dictionary()->coal_dict_over_populated(size);
duke@435 2246 }
duke@435 2247 }
duke@435 2248
duke@435 2249 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2250 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@3730 2251 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2252 fl->increment_coal_births();
duke@435 2253 fl->increment_surplus();
duke@435 2254 }
duke@435 2255
duke@435 2256 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2257 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@3730 2258 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2259 fl->increment_coal_deaths();
duke@435 2260 fl->decrement_surplus();
duke@435 2261 }
duke@435 2262
duke@435 2263 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2264 if (size < SmallForDictionary) {
duke@435 2265 smallCoalBirth(size);
duke@435 2266 } else {
jmasa@3732 2267 dictionary()->dict_census_udpate(size,
duke@435 2268 false /* split */,
duke@435 2269 true /* birth */);
duke@435 2270 }
duke@435 2271 }
duke@435 2272
duke@435 2273 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2274 if(size < SmallForDictionary) {
duke@435 2275 smallCoalDeath(size);
duke@435 2276 } else {
jmasa@3732 2277 dictionary()->dict_census_udpate(size,
duke@435 2278 false /* split */,
duke@435 2279 false /* birth */);
duke@435 2280 }
duke@435 2281 }
duke@435 2282
duke@435 2283 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2284 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@3730 2285 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2286 fl->increment_split_births();
duke@435 2287 fl->increment_surplus();
duke@435 2288 }
duke@435 2289
duke@435 2290 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2291 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@3730 2292 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2293 fl->increment_split_deaths();
duke@435 2294 fl->decrement_surplus();
duke@435 2295 }
duke@435 2296
jmasa@3732 2297 void CompactibleFreeListSpace::split_birth(size_t size) {
duke@435 2298 if (size < SmallForDictionary) {
duke@435 2299 smallSplitBirth(size);
duke@435 2300 } else {
jmasa@3732 2301 dictionary()->dict_census_udpate(size,
duke@435 2302 true /* split */,
duke@435 2303 true /* birth */);
duke@435 2304 }
duke@435 2305 }
duke@435 2306
duke@435 2307 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2308 if (size < SmallForDictionary) {
duke@435 2309 smallSplitDeath(size);
duke@435 2310 } else {
jmasa@3732 2311 dictionary()->dict_census_udpate(size,
duke@435 2312 true /* split */,
duke@435 2313 false /* birth */);
duke@435 2314 }
duke@435 2315 }
duke@435 2316
duke@435 2317 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2318 size_t to2 = from - to1;
duke@435 2319 splitDeath(from);
jmasa@3732 2320 split_birth(to1);
jmasa@3732 2321 split_birth(to2);
duke@435 2322 }
duke@435 2323
duke@435 2324 void CompactibleFreeListSpace::print() const {
ysr@2294 2325 print_on(tty);
duke@435 2326 }
duke@435 2327
duke@435 2328 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2329 assert_locked();
duke@435 2330 repairLinearAllocationBlocks();
duke@435 2331 // Verify that the SpoolBlocks look like free blocks of
duke@435 2332 // appropriate sizes... To be done ...
duke@435 2333 }
duke@435 2334
duke@435 2335 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2336 private:
duke@435 2337 const CompactibleFreeListSpace* _sp;
duke@435 2338 const MemRegion _span;
ysr@2071 2339 HeapWord* _last_addr;
ysr@2071 2340 size_t _last_size;
ysr@2071 2341 bool _last_was_obj;
ysr@2071 2342 bool _last_was_live;
duke@435 2343
duke@435 2344 public:
duke@435 2345 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2346 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2347 _last_addr(NULL), _last_size(0),
ysr@2071 2348 _last_was_obj(false), _last_was_live(false) { }
duke@435 2349
coleenp@548 2350 virtual size_t do_blk(HeapWord* addr) {
duke@435 2351 size_t res;
ysr@2071 2352 bool was_obj = false;
ysr@2071 2353 bool was_live = false;
duke@435 2354 if (_sp->block_is_obj(addr)) {
ysr@2071 2355 was_obj = true;
duke@435 2356 oop p = oop(addr);
duke@435 2357 guarantee(p->is_oop(), "Should be an oop");
duke@435 2358 res = _sp->adjustObjectSize(p->size());
duke@435 2359 if (_sp->obj_is_alive(addr)) {
ysr@2071 2360 was_live = true;
duke@435 2361 p->verify();
duke@435 2362 }
duke@435 2363 } else {
duke@435 2364 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2365 res = fc->size();
duke@435 2366 if (FLSVerifyLists && !fc->cantCoalesce()) {
jmasa@3732 2367 guarantee(_sp->verify_chunk_in_free_list(fc),
duke@435 2368 "Chunk should be on a free list");
duke@435 2369 }
duke@435 2370 }
ysr@2071 2371 if (res == 0) {
ysr@2071 2372 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2373 gclog_or_tty->print_cr(
ysr@2071 2374 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2375 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
ysr@2071 2376 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
ysr@2071 2377 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2378 _sp->print_on(gclog_or_tty);
ysr@2071 2379 guarantee(false, "Seppuku!");
ysr@2071 2380 }
ysr@2071 2381 _last_addr = addr;
ysr@2071 2382 _last_size = res;
ysr@2071 2383 _last_was_obj = was_obj;
ysr@2071 2384 _last_was_live = was_live;
duke@435 2385 return res;
duke@435 2386 }
duke@435 2387 };
duke@435 2388
duke@435 2389 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2390 private:
duke@435 2391 const CMSCollector* _collector;
duke@435 2392 const CompactibleFreeListSpace* _sp;
duke@435 2393 const MemRegion _span;
duke@435 2394 const bool _past_remark;
duke@435 2395 const CMSBitMap* _bit_map;
duke@435 2396
coleenp@548 2397 protected:
coleenp@548 2398 void do_oop(void* p, oop obj) {
coleenp@548 2399 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2400 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2401 // Should be a valid object; the first disjunct below allows
coleenp@548 2402 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2403 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2404 // are spanned by _span (CMS heap) above.
coleenp@548 2405 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2406 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2407 "Should be an object");
coleenp@548 2408 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2409 obj->verify();
coleenp@548 2410 if (_past_remark) {
coleenp@548 2411 // Remark has been completed, the object should be marked
coleenp@548 2412 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2413 }
coleenp@548 2414 } else { // reference within CMS heap
coleenp@548 2415 if (_past_remark) {
coleenp@548 2416 // Remark has been completed -- so the referent should have
coleenp@548 2417 // been marked, if referring object is.
coleenp@548 2418 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2419 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2420 }
coleenp@548 2421 }
coleenp@548 2422 }
coleenp@548 2423 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2424 // the reference is from FLS, and points out of FLS
coleenp@548 2425 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2426 obj->verify();
coleenp@548 2427 }
coleenp@548 2428 }
coleenp@548 2429
coleenp@548 2430 template <class T> void do_oop_work(T* p) {
coleenp@548 2431 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2432 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2433 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2434 do_oop(p, obj);
coleenp@548 2435 }
coleenp@548 2436 }
coleenp@548 2437
duke@435 2438 public:
duke@435 2439 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2440 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2441 bool past_remark, CMSBitMap* bit_map) :
duke@435 2442 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2443 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2444
coleenp@548 2445 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2446 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2447 };
duke@435 2448
brutisso@3711 2449 void CompactibleFreeListSpace::verify() const {
duke@435 2450 assert_lock_strong(&_freelistLock);
duke@435 2451 verify_objects_initialized();
duke@435 2452 MemRegion span = _collector->_span;
duke@435 2453 bool past_remark = (_collector->abstract_state() ==
duke@435 2454 CMSCollector::Sweeping);
duke@435 2455
duke@435 2456 ResourceMark rm;
duke@435 2457 HandleMark hm;
duke@435 2458
duke@435 2459 // Check integrity of CFL data structures
duke@435 2460 _promoInfo.verify();
duke@435 2461 _dictionary->verify();
duke@435 2462 if (FLSVerifyIndexTable) {
duke@435 2463 verifyIndexedFreeLists();
duke@435 2464 }
duke@435 2465 // Check integrity of all objects and free blocks in space
duke@435 2466 {
duke@435 2467 VerifyAllBlksClosure cl(this, span);
duke@435 2468 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2469 }
duke@435 2470 // Check that all references in the heap to FLS
duke@435 2471 // are to valid objects in FLS or that references in
duke@435 2472 // FLS are to valid objects elsewhere in the heap
duke@435 2473 if (FLSVerifyAllHeapReferences)
duke@435 2474 {
duke@435 2475 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2476 _collector->markBitMap());
duke@435 2477 CollectedHeap* ch = Universe::heap();
duke@435 2478 ch->oop_iterate(&cl); // all oops in generations
duke@435 2479 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2480 }
duke@435 2481
duke@435 2482 if (VerifyObjectStartArray) {
duke@435 2483 // Verify the block offset table
duke@435 2484 _bt.verify();
duke@435 2485 }
duke@435 2486 }
duke@435 2487
duke@435 2488 #ifndef PRODUCT
duke@435 2489 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2490 if (FLSVerifyLists) {
duke@435 2491 _dictionary->verify();
duke@435 2492 verifyIndexedFreeLists();
duke@435 2493 } else {
duke@435 2494 if (FLSVerifyDictionary) {
duke@435 2495 _dictionary->verify();
duke@435 2496 }
duke@435 2497 if (FLSVerifyIndexTable) {
duke@435 2498 verifyIndexedFreeLists();
duke@435 2499 }
duke@435 2500 }
duke@435 2501 }
duke@435 2502 #endif
duke@435 2503
duke@435 2504 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2505 size_t i = 0;
ysr@3264 2506 for (; i < IndexSetStart; i++) {
duke@435 2507 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2508 }
duke@435 2509 for (; i < IndexSetSize; i++) {
duke@435 2510 verifyIndexedFreeList(i);
duke@435 2511 }
duke@435 2512 }
duke@435 2513
duke@435 2514 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2515 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2516 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2517 size_t num = _indexedFreeList[size].count();
ysr@1580 2518 size_t n = 0;
ysr@3264 2519 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2520 "Slot should have been empty");
ysr@1580 2521 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2522 guarantee(fc->size() == size, "Size inconsistency");
jmasa@3732 2523 guarantee(fc->is_free(), "!free?");
duke@435 2524 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2525 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2526 }
ysr@1580 2527 guarantee(n == num, "Incorrect count");
duke@435 2528 }
duke@435 2529
duke@435 2530 #ifndef PRODUCT
ysr@3220 2531 void CompactibleFreeListSpace::check_free_list_consistency() const {
jmasa@3732 2532 assert(_dictionary->min_size() <= IndexSetSize,
duke@435 2533 "Some sizes can't be allocated without recourse to"
duke@435 2534 " linear allocation buffers");
jmasa@3730 2535 assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
duke@435 2536 "else MIN_TREE_CHUNK_SIZE is wrong");
ysr@3220 2537 assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
ysr@3220 2538 (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
ysr@3264 2539 assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0),
duke@435 2540 "Some for-loops may be incorrectly initialized");
duke@435 2541 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2542 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2543 }
duke@435 2544 #endif
duke@435 2545
ysr@447 2546 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2547 assert_lock_strong(&_freelistLock);
jmasa@3730 2548 FreeList<FreeChunk> total;
ysr@447 2549 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
jmasa@3730 2550 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
jmasa@3732 2551 size_t total_free = 0;
duke@435 2552 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3730 2553 const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2554 total_free += fl->count() * fl->size();
ysr@447 2555 if (i % (40*IndexSetStride) == 0) {
jmasa@3730 2556 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
ysr@447 2557 }
ysr@447 2558 fl->print_on(gclog_or_tty);
jmasa@3732 2559 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
ysr@447 2560 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2561 total.set_desired( total.desired() + fl->desired() );
jmasa@3732 2562 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
jmasa@3732 2563 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
ysr@447 2564 total.set_count( total.count() + fl->count() );
jmasa@3732 2565 total.set_coal_births( total.coal_births() + fl->coal_births() );
jmasa@3732 2566 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
jmasa@3732 2567 total.set_split_births(total.split_births() + fl->split_births());
jmasa@3732 2568 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
duke@435 2569 }
ysr@447 2570 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2571 gclog_or_tty->print_cr("Total free in indexed lists "
jmasa@3732 2572 SIZE_FORMAT " words", total_free);
duke@435 2573 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
jmasa@3732 2574 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
jmasa@3732 2575 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
ysr@447 2576 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
jmasa@3732 2577 _dictionary->print_dict_census();
duke@435 2578 }
duke@435 2579
ysr@1580 2580 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2581 // CFLS_LAB
ysr@1580 2582 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2583
ysr@1580 2584 #define VECTOR_257(x) \
ysr@1580 2585 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2586 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2587 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2588 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2589 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2590 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2591 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2592 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2593 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2594 x }
ysr@1580 2595
ysr@1580 2596 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2597 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2598 // command-line, this will get reinitialized via a call to
ysr@1580 2599 // modify_initialization() below.
ysr@1580 2600 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2601 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2602 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2603 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2604
duke@435 2605 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2606 _cfls(cfls)
duke@435 2607 {
ysr@1580 2608 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2609 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2610 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2611 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2612 _indexedFreeList[i].set_size(i);
ysr@1580 2613 _num_blocks[i] = 0;
ysr@1580 2614 }
ysr@1580 2615 }
ysr@1580 2616
ysr@1580 2617 static bool _CFLS_LAB_modified = false;
ysr@1580 2618
ysr@1580 2619 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2620 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2621 _CFLS_LAB_modified = true;
ysr@1580 2622 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2623 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2624 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2625 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2626 }
duke@435 2627 }
duke@435 2628
duke@435 2629 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2630 FreeChunk* res;
ysr@2132 2631 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2632 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2633 // This locking manages sync with other large object allocations.
duke@435 2634 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2635 Mutex::_no_safepoint_check_flag);
duke@435 2636 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2637 if (res == NULL) return NULL;
duke@435 2638 } else {
jmasa@3730 2639 FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
duke@435 2640 if (fl->count() == 0) {
duke@435 2641 // Attempt to refill this local free list.
ysr@1580 2642 get_from_global_pool(word_sz, fl);
duke@435 2643 // If it didn't work, give up.
duke@435 2644 if (fl->count() == 0) return NULL;
duke@435 2645 }
jmasa@3732 2646 res = fl->get_chunk_at_head();
duke@435 2647 assert(res != NULL, "Why was count non-zero?");
duke@435 2648 }
duke@435 2649 res->markNotFree();
jmasa@3732 2650 assert(!res->is_free(), "shouldn't be marked free");
coleenp@622 2651 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2652 // mangle a just allocated object with a distinct pattern.
duke@435 2653 debug_only(res->mangleAllocated(word_sz));
duke@435 2654 return (HeapWord*)res;
duke@435 2655 }
duke@435 2656
ysr@1580 2657 // Get a chunk of blocks of the right size and update related
ysr@1580 2658 // book-keeping stats
jmasa@3730 2659 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
ysr@1580 2660 // Get the #blocks we want to claim
ysr@1580 2661 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2662 assert(n_blks > 0, "Error");
ysr@1580 2663 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2664 // In some cases, when the application has a phase change,
ysr@1580 2665 // there may be a sudden and sharp shift in the object survival
ysr@1580 2666 // profile, and updating the counts at the end of a scavenge
ysr@1580 2667 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2668 // during these phase changes. It is beneficial to detect such
ysr@1580 2669 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2670 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2671 // It is protected by a product flag until we have gained
ysr@1580 2672 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2673 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2674 // small spikes, so some kind of historical smoothing based on
ysr@1580 2675 // previous experience with the greater reactivity might be useful.
ysr@1580 2676 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2677 // default.
ysr@1580 2678 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2679 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2680 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2681 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2682 }
ysr@1580 2683 assert(n_blks > 0, "Error");
ysr@1580 2684 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2685 // Update stats table entry for this block size
ysr@1580 2686 _num_blocks[word_sz] += fl->count();
ysr@1580 2687 }
ysr@1580 2688
ysr@1580 2689 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2690 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2691 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2692 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2693 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2694 "Counter inconsistency");
ysr@1580 2695 if (_global_num_workers[i] > 0) {
ysr@1580 2696 // Need to smooth wrt historical average
ysr@1580 2697 if (ResizeOldPLAB) {
ysr@1580 2698 _blocks_to_claim[i].sample(
ysr@1580 2699 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2700 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2701 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2702 }
ysr@1580 2703 // Reset counters for next round
ysr@1580 2704 _global_num_workers[i] = 0;
ysr@1580 2705 _global_num_blocks[i] = 0;
ysr@1580 2706 if (PrintOldPLAB) {
ysr@1580 2707 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2708 }
duke@435 2709 }
duke@435 2710 }
duke@435 2711 }
duke@435 2712
ysr@3220 2713 // If this is changed in the future to allow parallel
ysr@3220 2714 // access, one would need to take the FL locks and,
ysr@3220 2715 // depending on how it is used, stagger access from
ysr@3220 2716 // parallel threads to reduce contention.
ysr@1580 2717 void CFLS_LAB::retire(int tid) {
ysr@1580 2718 // We run this single threaded with the world stopped;
ysr@1580 2719 // so no need for locks and such.
ysr@1580 2720 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2721 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2722 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2723 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2724 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2725 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2726 "Can't retire more than what we obtained");
ysr@1580 2727 if (_num_blocks[i] > 0) {
ysr@1580 2728 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2729 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2730 {
ysr@3220 2731 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2732 // Mutex::_no_safepoint_check_flag);
ysr@3220 2733
ysr@1580 2734 // Update globals stats for num_blocks used
ysr@1580 2735 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2736 _global_num_workers[i]++;
jmasa@3357 2737 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2738 if (num_retire > 0) {
ysr@1580 2739 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2740 // Reset this list.
jmasa@3730 2741 _indexedFreeList[i] = FreeList<FreeChunk>();
ysr@1580 2742 _indexedFreeList[i].set_size(i);
ysr@1580 2743 }
ysr@1580 2744 }
ysr@1580 2745 if (PrintOldPLAB) {
ysr@1580 2746 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2747 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2748 }
ysr@1580 2749 // Reset stats for next round
ysr@1580 2750 _num_blocks[i] = 0;
ysr@1580 2751 }
ysr@1580 2752 }
ysr@1580 2753 }
ysr@1580 2754
jmasa@3730 2755 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
duke@435 2756 assert(fl->count() == 0, "Precondition.");
duke@435 2757 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2758 "Precondition");
duke@435 2759
ysr@1580 2760 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2761 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2762 // then try getting a big chunk and splitting it.
ysr@1580 2763 {
ysr@1580 2764 bool found;
ysr@1580 2765 int k;
ysr@1580 2766 size_t cur_sz;
ysr@1580 2767 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2768 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2769 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2770 k++, cur_sz = k * word_sz) {
jmasa@3730 2771 FreeList<FreeChunk> fl_for_cur_sz; // Empty.
ysr@1580 2772 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2773 {
ysr@1580 2774 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2775 Mutex::_no_safepoint_check_flag);
jmasa@3730 2776 FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2777 if (gfl->count() != 0) {
ysr@1580 2778 // nn is the number of chunks of size cur_sz that
ysr@1580 2779 // we'd need to split k-ways each, in order to create
ysr@1580 2780 // "n" chunks of size word_sz each.
ysr@1580 2781 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2782 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2783 found = true;
ysr@1580 2784 if (k > 1) {
ysr@1580 2785 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2786 // we increment the split death count by the number of blocks
ysr@1580 2787 // we just took from the cur_sz-size blocks list and which
ysr@1580 2788 // we will be splitting below.
jmasa@3732 2789 ssize_t deaths = gfl->split_deaths() +
ysr@1580 2790 fl_for_cur_sz.count();
jmasa@3732 2791 gfl->set_split_deaths(deaths);
ysr@1580 2792 }
ysr@1580 2793 }
ysr@1580 2794 }
ysr@1580 2795 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2796 if (found) {
ysr@1580 2797 if (k == 1) {
ysr@1580 2798 fl->prepend(&fl_for_cur_sz);
ysr@1580 2799 } else {
ysr@1580 2800 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2801 FreeChunk* fc;
jmasa@3732 2802 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
ysr@1580 2803 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2804 // access the main chunk sees it as a single free block until we
ysr@1580 2805 // change it.
ysr@1580 2806 size_t fc_size = fc->size();
jmasa@3732 2807 assert(fc->is_free(), "Error");
ysr@1580 2808 for (int i = k-1; i >= 0; i--) {
ysr@1580 2809 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2810 assert((i != 0) ||
jmasa@3732 2811 ((fc == ffc) && ffc->is_free() &&
ysr@2071 2812 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2813 "Counting error");
jmasa@3732 2814 ffc->set_size(word_sz);
jmasa@3732 2815 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2816 ffc->link_next(NULL);
ysr@1580 2817 // Above must occur before BOT is updated below.
ysr@2071 2818 OrderAccess::storestore();
ysr@2071 2819 // splitting from the right, fc_size == i * word_sz
ysr@2071 2820 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2821 fc_size -= word_sz;
ysr@2071 2822 assert(fc_size == i*word_sz, "Error");
ysr@2071 2823 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2824 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2825 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2826 // Push this on "fl".
jmasa@3732 2827 fl->return_chunk_at_head(ffc);
ysr@1580 2828 }
ysr@1580 2829 // TRAP
ysr@1580 2830 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2831 }
ysr@1580 2832 }
ysr@1580 2833 // Update birth stats for this block size.
ysr@1580 2834 size_t num = fl->count();
ysr@1580 2835 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2836 Mutex::_no_safepoint_check_flag);
jmasa@3732 2837 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
jmasa@3732 2838 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2839 return;
duke@435 2840 }
duke@435 2841 }
duke@435 2842 }
duke@435 2843 // Otherwise, we'll split a block from the dictionary.
duke@435 2844 FreeChunk* fc = NULL;
duke@435 2845 FreeChunk* rem_fc = NULL;
duke@435 2846 size_t rem;
duke@435 2847 {
duke@435 2848 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2849 Mutex::_no_safepoint_check_flag);
duke@435 2850 while (n > 0) {
jmasa@3732 2851 fc = dictionary()->get_chunk(MAX2(n * word_sz,
jmasa@3732 2852 _dictionary->min_size()),
jmasa@3730 2853 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 2854 if (fc != NULL) {
ysr@2071 2855 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@3732 2856 dictionary()->dict_census_udpate(fc->size(),
duke@435 2857 true /*split*/,
duke@435 2858 false /*birth*/);
duke@435 2859 break;
duke@435 2860 } else {
duke@435 2861 n--;
duke@435 2862 }
duke@435 2863 }
duke@435 2864 if (fc == NULL) return;
ysr@2071 2865 // Otherwise, split up that block.
ysr@1580 2866 assert((ssize_t)n >= 1, "Control point invariant");
jmasa@3732 2867 assert(fc->is_free(), "Error: should be a free block");
ysr@2071 2868 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2869 const size_t nn = fc->size() / word_sz;
duke@435 2870 n = MIN2(nn, n);
ysr@1580 2871 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2872 rem = fc->size() - n * word_sz;
duke@435 2873 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2874 if (rem > 0 && rem < MinChunkSize) {
duke@435 2875 n--; rem += word_sz;
duke@435 2876 }
jmasa@1583 2877 // Note that at this point we may have n == 0.
jmasa@1583 2878 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2879
jmasa@1583 2880 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2881 // enough to leave a viable remainder. We are unable to
jmasa@1583 2882 // allocate even one block. Return fc to the
jmasa@1583 2883 // dictionary and return, leaving "fl" empty.
jmasa@1583 2884 if (n == 0) {
jmasa@1583 2885 returnChunkToDictionary(fc);
ysr@2071 2886 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2887 return;
jmasa@1583 2888 }
jmasa@1583 2889
duke@435 2890 // First return the remainder, if any.
duke@435 2891 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2892 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2893 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2894 // hit if the block is a small block.)
duke@435 2895 if (rem > 0) {
duke@435 2896 size_t prefix_size = n * word_sz;
duke@435 2897 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
jmasa@3732 2898 rem_fc->set_size(rem);
jmasa@3732 2899 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2900 rem_fc->link_next(NULL);
duke@435 2901 // Above must occur before BOT is updated below.
ysr@1580 2902 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2903 OrderAccess::storestore();
duke@435 2904 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
jmasa@3732 2905 assert(fc->is_free(), "Error");
jmasa@3732 2906 fc->set_size(prefix_size);
duke@435 2907 if (rem >= IndexSetSize) {
duke@435 2908 returnChunkToDictionary(rem_fc);
jmasa@3732 2909 dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
duke@435 2910 rem_fc = NULL;
duke@435 2911 }
duke@435 2912 // Otherwise, return it to the small list below.
duke@435 2913 }
duke@435 2914 }
duke@435 2915 if (rem_fc != NULL) {
duke@435 2916 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2917 Mutex::_no_safepoint_check_flag);
duke@435 2918 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
jmasa@3732 2919 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
duke@435 2920 smallSplitBirth(rem);
duke@435 2921 }
ysr@1580 2922 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2923 // Now do the splitting up.
duke@435 2924 // Must do this in reverse order, so that anybody attempting to
duke@435 2925 // access the main chunk sees it as a single free block until we
duke@435 2926 // change it.
duke@435 2927 size_t fc_size = n * word_sz;
duke@435 2928 // All but first chunk in this loop
duke@435 2929 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2930 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
jmasa@3732 2931 ffc->set_size(word_sz);
jmasa@3732 2932 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2933 ffc->link_next(NULL);
duke@435 2934 // Above must occur before BOT is updated below.
ysr@2071 2935 OrderAccess::storestore();
duke@435 2936 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2937 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2938 fc_size -= word_sz;
duke@435 2939 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2940 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2941 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2942 // Push this on "fl".
jmasa@3732 2943 fl->return_chunk_at_head(ffc);
duke@435 2944 }
duke@435 2945 // First chunk
jmasa@3732 2946 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2947 // The blocks above should show their new sizes before the first block below
jmasa@3732 2948 fc->set_size(word_sz);
jmasa@3732 2949 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
jmasa@3732 2950 fc->link_next(NULL);
duke@435 2951 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2952 _bt.verify_single_block((HeapWord*)fc, fc->size());
jmasa@3732 2953 fl->return_chunk_at_head(fc);
duke@435 2954
ysr@1580 2955 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2956 {
ysr@1580 2957 // Update the stats for this block size.
duke@435 2958 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2959 Mutex::_no_safepoint_check_flag);
jmasa@3732 2960 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
jmasa@3732 2961 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2962 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2963 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2964 }
duke@435 2965
duke@435 2966 // TRAP
duke@435 2967 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2968 }
duke@435 2969
duke@435 2970 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2971 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2972 // XXX Need to suitably abstract and generalize this and the next
duke@435 2973 // method into one.
duke@435 2974 void
duke@435 2975 CompactibleFreeListSpace::
duke@435 2976 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2977 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2978 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2979 const size_t task_size = rescan_task_size();
duke@435 2980 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2981 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2982 assert(n_tasks == 0 ||
ysr@775 2983 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2984 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2985 "n_tasks calculation incorrect");
duke@435 2986 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2987 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2988 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2989 // need to finish in order to be done).
jmasa@2188 2990 pst->set_n_threads(n_threads);
duke@435 2991 pst->set_n_tasks((int)n_tasks);
duke@435 2992 }
duke@435 2993
duke@435 2994 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2995 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2996 void
duke@435 2997 CompactibleFreeListSpace::
duke@435 2998 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2999 HeapWord* low) {
duke@435 3000 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3001 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3002 const size_t task_size = marking_task_size();
duke@435 3003 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3004 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3005 "Otherwise arithmetic below would be incorrect");
duke@435 3006 MemRegion span = _gen->reserved();
duke@435 3007 if (low != NULL) {
duke@435 3008 if (span.contains(low)) {
duke@435 3009 // Align low down to a card boundary so that
duke@435 3010 // we can use block_offset_careful() on span boundaries.
duke@435 3011 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3012 CardTableModRefBS::card_size);
duke@435 3013 // Clip span prefix at aligned_low
duke@435 3014 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3015 } else if (low > span.end()) {
duke@435 3016 span = MemRegion(low, low); // Null region
duke@435 3017 } // else use entire span
duke@435 3018 }
duke@435 3019 assert(span.is_empty() ||
duke@435 3020 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3021 "span should start at a card boundary");
duke@435 3022 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3023 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3024 assert(n_tasks == 0 ||
duke@435 3025 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3026 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3027 "n_tasks calculation incorrect");
duke@435 3028 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3029 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3030 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3031 // need to finish in order to be done).
jmasa@2188 3032 pst->set_n_threads(n_threads);
duke@435 3033 pst->set_n_tasks((int)n_tasks);
duke@435 3034 }

mercurial