src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 29 Apr 2014 15:17:27 +0200

author
goetz
date
Tue, 29 Apr 2014 15:17:27 +0200
changeset 6911
ce8f6bb717c9
parent 6680
78bbf4d43a14
child 6912
c49dcaf78a65
permissions
-rw-r--r--

8042195: Introduce umbrella header orderAccess.inline.hpp.
Reviewed-by: dholmes, kvn, stefank, twisti

duke@435 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
coleenp@4037 32 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/globals.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/init.hpp"
stefank@2314 41 #include "runtime/java.hpp"
goetz@6911 42 #include "runtime/orderAccess.inline.hpp"
stefank@2314 43 #include "runtime/vmThread.hpp"
stefank@2314 44 #include "utilities/copy.hpp"
duke@435 45
duke@435 46 /////////////////////////////////////////////////////////////////////////
duke@435 47 //// CompactibleFreeListSpace
duke@435 48 /////////////////////////////////////////////////////////////////////////
duke@435 49
duke@435 50 // highest ranked free list lock rank
duke@435 51 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 52
kvn@1926 53 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 54 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 55 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 56
kvn@1926 57 size_t MinChunkSize = 0;
kvn@1926 58
kvn@1926 59 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 60 // Set CMS global values
kvn@1926 61 assert(MinChunkSize == 0, "already set");
brutisso@3807 62
brutisso@3807 63 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
brutisso@3807 64 // for chunks to contain a FreeChunk.
brutisso@3807 65 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
brutisso@3807 66 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
kvn@1926 67
kvn@1926 68 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 69 IndexSetStart = MinChunkSize;
kvn@1926 70 IndexSetStride = MinObjAlignment;
kvn@1926 71 }
kvn@1926 72
duke@435 73 // Constructor
duke@435 74 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 75 MemRegion mr, bool use_adaptive_freelists,
jmasa@3730 76 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
duke@435 77 _dictionaryChoice(dictionaryChoice),
duke@435 78 _adaptive_freelists(use_adaptive_freelists),
duke@435 79 _bt(bs, mr),
duke@435 80 // free list locks are in the range of values taken by _lockRank
duke@435 81 // This range currently is [_leaf+2, _leaf+3]
duke@435 82 // Note: this requires that CFLspace c'tors
duke@435 83 // are called serially in the order in which the locks are
duke@435 84 // are acquired in the program text. This is true today.
duke@435 85 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 86 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 87 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 88 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 89 CMSRescanMultiple),
duke@435 90 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 91 CMSConcMarkMultiple),
duke@435 92 _collector(NULL)
duke@435 93 {
jmasa@3730 94 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
jmasa@4196 95 "FreeChunk is larger than expected");
duke@435 96 _bt.set_space(this);
jmasa@698 97 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 98 // We have all of "mr", all of which we place in the dictionary
duke@435 99 // as one big chunk. We'll need to decide here which of several
duke@435 100 // possible alternative dictionary implementations to use. For
duke@435 101 // now the choice is easy, since we have only one working
duke@435 102 // implementation, namely, the simple binary tree (splaying
duke@435 103 // temporarily disabled).
duke@435 104 switch (dictionaryChoice) {
jmasa@4196 105 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
jmasa@4488 106 _dictionary = new AFLBinaryTreeDictionary(mr);
jmasa@4196 107 break;
jmasa@3730 108 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
jmasa@3730 109 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
duke@435 110 default:
duke@435 111 warning("dictionaryChoice: selected option not understood; using"
duke@435 112 " default BinaryTreeDictionary implementation instead.");
duke@435 113 }
duke@435 114 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 115 // The indexed free lists are initially all empty and are lazily
duke@435 116 // filled in on demand. Initialize the array elements to NULL.
duke@435 117 initializeIndexedFreeListArray();
duke@435 118
duke@435 119 // Not using adaptive free lists assumes that allocation is first
duke@435 120 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 121 // has to have the klass's klassKlass allocated at a lower
duke@435 122 // address in the heap than the klass so that the klassKlass is
duke@435 123 // moved to its new location before the klass is moved.
duke@435 124 // Set the _refillSize for the linear allocation blocks
duke@435 125 if (!use_adaptive_freelists) {
jmasa@4488 126 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
jmasa@4488 127 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 128 // The small linAB initially has all the space and will allocate
duke@435 129 // a chunk of any size.
duke@435 130 HeapWord* addr = (HeapWord*) fc;
duke@435 131 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 132 1024*SmallForLinearAlloc, fc->size());
duke@435 133 // Note that _unallocated_block is not updated here.
duke@435 134 // Allocations from the linear allocation block should
duke@435 135 // update it.
duke@435 136 } else {
duke@435 137 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 138 SmallForLinearAlloc);
duke@435 139 }
duke@435 140 // CMSIndexedFreeListReplenish should be at least 1
duke@435 141 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 142 _promoInfo.setSpace(this);
duke@435 143 if (UseCMSBestFit) {
duke@435 144 _fitStrategy = FreeBlockBestFitFirst;
duke@435 145 } else {
duke@435 146 _fitStrategy = FreeBlockStrategyNone;
duke@435 147 }
ysr@3220 148 check_free_list_consistency();
duke@435 149
duke@435 150 // Initialize locks for parallel case.
jmasa@2188 151
jmasa@2188 152 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 153 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 154 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 155 "a freelist par lock",
duke@435 156 true);
duke@435 157 DEBUG_ONLY(
duke@435 158 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 159 )
duke@435 160 }
duke@435 161 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 162 }
duke@435 163 }
duke@435 164
duke@435 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 166 // update the block offset table. Removed initialize_threshold call because
duke@435 167 // CFLS does not use a block offset array for contiguous spaces.
duke@435 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 169 CompactPoint* cp, HeapWord* compact_top) {
duke@435 170 // q is alive
duke@435 171 // First check if we should switch compaction space
duke@435 172 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 173 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 174 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 175 "virtual adjustObjectSize_v() method is not correct");
duke@435 176 size_t adjusted_size = adjustObjectSize(size);
duke@435 177 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 178 "no small fragments allowed");
duke@435 179 assert(minimum_free_block_size() == MinChunkSize,
duke@435 180 "for de-virtualized reference below");
duke@435 181 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 182 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 183 adjusted_size != compaction_max_size) {
duke@435 184 do {
duke@435 185 // switch to next compaction space
duke@435 186 cp->space->set_compaction_top(compact_top);
duke@435 187 cp->space = cp->space->next_compaction_space();
duke@435 188 if (cp->space == NULL) {
duke@435 189 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 190 assert(cp->gen != NULL, "compaction must succeed");
duke@435 191 cp->space = cp->gen->first_compaction_space();
duke@435 192 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 193 }
duke@435 194 compact_top = cp->space->bottom();
duke@435 195 cp->space->set_compaction_top(compact_top);
duke@435 196 // The correct adjusted_size may not be the same as that for this method
duke@435 197 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 198 // Use the virtual method which is not used above to save the virtual
duke@435 199 // dispatch.
duke@435 200 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 201 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 202 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 203 } while (adjusted_size > compaction_max_size);
duke@435 204 }
duke@435 205
duke@435 206 // store the forwarding pointer into the mark word
duke@435 207 if ((HeapWord*)q != compact_top) {
duke@435 208 q->forward_to(oop(compact_top));
duke@435 209 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 210 } else {
duke@435 211 // if the object isn't moving we can just set the mark to the default
duke@435 212 // mark and handle it specially later on.
duke@435 213 q->init_mark();
duke@435 214 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 215 }
duke@435 216
duke@435 217 compact_top += adjusted_size;
duke@435 218
duke@435 219 // we need to update the offset table so that the beginnings of objects can be
duke@435 220 // found during scavenge. Note that we are updating the offset table based on
duke@435 221 // where the object will be once the compaction phase finishes.
duke@435 222
duke@435 223 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 224 // the compaction_top exceeds the current threshold but not for an
duke@435 225 // non-contiguous space.
duke@435 226 cp->threshold =
duke@435 227 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 228 return compact_top;
duke@435 229 }
duke@435 230
duke@435 231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 232 // and use of single_block instead of alloc_block. The name here is not really
duke@435 233 // appropriate - maybe a more general name could be invented for both the
duke@435 234 // contiguous and noncontiguous spaces.
duke@435 235
duke@435 236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 237 _bt.single_block(start, the_end);
duke@435 238 return end();
duke@435 239 }
duke@435 240
duke@435 241 // Initialize them to NULL.
duke@435 242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 243 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 244 // Note that on platforms where objects are double word aligned,
duke@435 245 // the odd array elements are not used. It is convenient, however,
duke@435 246 // to map directly from the object size to the array element.
duke@435 247 _indexedFreeList[i].reset(IndexSetSize);
duke@435 248 _indexedFreeList[i].set_size(i);
duke@435 249 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 250 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 251 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 252 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 253 }
duke@435 254 }
duke@435 255
duke@435 256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 257 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 258 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 259 "Indexed free list sizes are incorrect");
duke@435 260 _indexedFreeList[i].reset(IndexSetSize);
duke@435 261 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 262 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 263 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 264 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 265 }
duke@435 266 }
duke@435 267
duke@435 268 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 269 resetIndexedFreeListArray();
duke@435 270 dictionary()->reset();
duke@435 271 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 272 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 273 // Everything's allocated until proven otherwise.
duke@435 274 _bt.set_unallocated_block(end());
duke@435 275 }
duke@435 276 if (!mr.is_empty()) {
duke@435 277 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 278 _bt.single_block(mr.start(), mr.word_size());
duke@435 279 FreeChunk* fc = (FreeChunk*) mr.start();
jmasa@3732 280 fc->set_size(mr.word_size());
duke@435 281 if (mr.word_size() >= IndexSetSize ) {
duke@435 282 returnChunkToDictionary(fc);
duke@435 283 } else {
duke@435 284 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
jmasa@3732 285 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
duke@435 286 }
brutisso@5163 287 coalBirth(mr.word_size());
duke@435 288 }
duke@435 289 _promoInfo.reset();
duke@435 290 _smallLinearAllocBlock._ptr = NULL;
duke@435 291 _smallLinearAllocBlock._word_size = 0;
duke@435 292 }
duke@435 293
duke@435 294 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 295 // Reset the space to the new reality - one free chunk.
duke@435 296 MemRegion mr(compaction_top(), end());
duke@435 297 reset(mr);
duke@435 298 // Now refill the linear allocation block(s) if possible.
duke@435 299 if (_adaptive_freelists) {
duke@435 300 refillLinearAllocBlocksIfNeeded();
duke@435 301 } else {
duke@435 302 // Place as much of mr in the linAB as we can get,
duke@435 303 // provided it was big enough to go into the dictionary.
jmasa@3732 304 FreeChunk* fc = dictionary()->find_largest_dict();
duke@435 305 if (fc != NULL) {
duke@435 306 assert(fc->size() == mr.word_size(),
duke@435 307 "Why was the chunk broken up?");
duke@435 308 removeChunkFromDictionary(fc);
duke@435 309 HeapWord* addr = (HeapWord*) fc;
duke@435 310 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 311 1024*SmallForLinearAlloc, fc->size());
duke@435 312 // Note that _unallocated_block is not updated here.
duke@435 313 }
duke@435 314 }
duke@435 315 }
duke@435 316
duke@435 317 // Walks the entire dictionary, returning a coterminal
duke@435 318 // chunk, if it exists. Use with caution since it involves
duke@435 319 // a potentially complete walk of a potentially large tree.
duke@435 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 321
duke@435 322 assert_lock_strong(&_freelistLock);
duke@435 323
duke@435 324 return dictionary()->find_chunk_ends_at(end());
duke@435 325 }
duke@435 326
duke@435 327
duke@435 328 #ifndef PRODUCT
duke@435 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 330 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 331 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
duke@435 332 }
duke@435 333 }
duke@435 334
duke@435 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 336 size_t sum = 0;
duke@435 337 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@3732 338 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
duke@435 339 }
duke@435 340 return sum;
duke@435 341 }
duke@435 342
duke@435 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 344 size_t count = 0;
ysr@3264 345 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 346 debug_only(
duke@435 347 ssize_t total_list_count = 0;
duke@435 348 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 349 fc = fc->next()) {
duke@435 350 total_list_count++;
duke@435 351 }
duke@435 352 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 353 "Count in list is incorrect");
duke@435 354 )
duke@435 355 count += _indexedFreeList[i].count();
duke@435 356 }
duke@435 357 return count;
duke@435 358 }
duke@435 359
duke@435 360 size_t CompactibleFreeListSpace::totalCount() {
duke@435 361 size_t num = totalCountInIndexedFreeLists();
jmasa@3732 362 num += dictionary()->total_count();
duke@435 363 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 364 num++;
duke@435 365 }
duke@435 366 return num;
duke@435 367 }
duke@435 368 #endif
duke@435 369
duke@435 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 371 FreeChunk* fc = (FreeChunk*) p;
jmasa@3732 372 return fc->is_free();
duke@435 373 }
duke@435 374
duke@435 375 size_t CompactibleFreeListSpace::used() const {
duke@435 376 return capacity() - free();
duke@435 377 }
duke@435 378
duke@435 379 size_t CompactibleFreeListSpace::free() const {
duke@435 380 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 381 // if you do this while the structures are in flux you
duke@435 382 // may get an approximate answer only; for instance
duke@435 383 // because there is concurrent allocation either
duke@435 384 // directly by mutators or for promotion during a GC.
duke@435 385 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 386 // not to crash and burn, for instance, because of walking
duke@435 387 // pointers that could disappear as you were walking them.
duke@435 388 // The approximation is because the various components
duke@435 389 // that are read below are not read atomically (and
duke@435 390 // further the computation of totalSizeInIndexedFreeLists()
duke@435 391 // is itself a non-atomic computation. The normal use of
duke@435 392 // this is during a resize operation at the end of GC
duke@435 393 // and at that time you are guaranteed to get the
duke@435 394 // correct actual value. However, for instance, this is
duke@435 395 // also read completely asynchronously by the "perf-sampler"
duke@435 396 // that supports jvmstat, and you are apt to see the values
duke@435 397 // flicker in such cases.
duke@435 398 assert(_dictionary != NULL, "No _dictionary?");
jmasa@3732 399 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
duke@435 400 totalSizeInIndexedFreeLists() +
duke@435 401 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 402 }
duke@435 403
duke@435 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 405 assert(_dictionary != NULL, "No _dictionary?");
duke@435 406 assert_locked();
jmasa@3732 407 size_t res = _dictionary->max_chunk_size();
duke@435 408 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 409 (size_t) SmallForLinearAlloc - 1));
duke@435 410 // XXX the following could potentially be pretty slow;
duke@435 411 // should one, pesimally for the rare cases when res
duke@435 412 // caclulated above is less than IndexSetSize,
duke@435 413 // just return res calculated above? My reasoning was that
duke@435 414 // those cases will be so rare that the extra time spent doesn't
duke@435 415 // really matter....
duke@435 416 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 417 // to i > res below, because i is unsigned and res may be zero.
duke@435 418 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 419 i -= IndexSetStride) {
duke@435 420 if (_indexedFreeList[i].head() != NULL) {
duke@435 421 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 422 return i;
duke@435 423 }
duke@435 424 }
duke@435 425 return res;
duke@435 426 }
duke@435 427
ysr@2071 428 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 429 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 430 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
drchase@6680 431 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
ysr@2071 432 }
ysr@2071 433
ysr@2071 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 435 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 436 st->print_cr(" Space:");
ysr@2071 437 Space::print_on(st);
ysr@2071 438
ysr@2071 439 st->print_cr("promoInfo:");
ysr@2071 440 _promoInfo.print_on(st);
ysr@2071 441
ysr@2071 442 st->print_cr("_smallLinearAllocBlock");
ysr@2071 443 _smallLinearAllocBlock.print_on(st);
ysr@2071 444
ysr@2071 445 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 446
ysr@2071 447 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 448 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 449 }
ysr@2071 450
ysr@1580 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 452 const {
ysr@1580 453 reportIndexedFreeListStatistics();
ysr@1580 454 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 455 gclog_or_tty->print_cr("---------------------------");
jmasa@4196 456 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
ysr@1580 457 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 458 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 459 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 460 fc = fc->next()) {
ysr@1580 461 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
drchase@6680 462 p2i(fc), p2i((HeapWord*)fc + i),
ysr@1580 463 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 464 }
ysr@1580 465 }
ysr@1580 466 }
ysr@1580 467
ysr@1580 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 469 const {
ysr@1580 470 _promoInfo.print_on(st);
ysr@1580 471 }
ysr@1580 472
ysr@1580 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 474 const {
jmasa@3732 475 _dictionary->report_statistics();
ysr@1580 476 st->print_cr("Layout of Freelists in Tree");
ysr@1580 477 st->print_cr("---------------------------");
ysr@1580 478 _dictionary->print_free_lists(st);
ysr@1580 479 }
ysr@1580 480
ysr@1580 481 class BlkPrintingClosure: public BlkClosure {
ysr@1580 482 const CMSCollector* _collector;
ysr@1580 483 const CompactibleFreeListSpace* _sp;
ysr@1580 484 const CMSBitMap* _live_bit_map;
ysr@1580 485 const bool _post_remark;
ysr@1580 486 outputStream* _st;
ysr@1580 487 public:
ysr@1580 488 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 489 const CompactibleFreeListSpace* sp,
ysr@1580 490 const CMSBitMap* live_bit_map,
ysr@1580 491 outputStream* st):
ysr@1580 492 _collector(collector),
ysr@1580 493 _sp(sp),
ysr@1580 494 _live_bit_map(live_bit_map),
ysr@1580 495 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 496 _st(st) { }
ysr@1580 497 size_t do_blk(HeapWord* addr);
ysr@1580 498 };
ysr@1580 499
ysr@1580 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 501 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 502 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 503 if (_sp->block_is_obj(addr)) {
ysr@1580 504 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 505 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
drchase@6680 506 p2i(addr),
ysr@1580 507 dead ? "dead" : "live",
ysr@1580 508 sz,
ysr@1580 509 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 510 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 511 oop(addr)->print_on(_st);
ysr@1580 512 _st->print_cr("--------------------------------------");
ysr@1580 513 }
ysr@1580 514 } else { // free block
ysr@1580 515 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
drchase@6680 516 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 517 if (CMSPrintChunksInDump) {
ysr@1580 518 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 519 _st->print_cr("--------------------------------------");
ysr@1580 520 }
ysr@1580 521 }
ysr@1580 522 return sz;
ysr@1580 523 }
ysr@1580 524
ysr@1580 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 526 outputStream* st) {
ysr@1580 527 st->print_cr("\n=========================");
ysr@1580 528 st->print_cr("Block layout in CMS Heap:");
ysr@1580 529 st->print_cr("=========================");
ysr@1580 530 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 531 blk_iterate(&bpcl);
ysr@1580 532
ysr@1580 533 st->print_cr("\n=======================================");
ysr@1580 534 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 535 st->print_cr("=======================================");
ysr@1580 536 print_promo_info_blocks(st);
ysr@1580 537
ysr@1580 538 st->print_cr("\n===========================");
ysr@1580 539 st->print_cr("Order of Indexed Free Lists");
ysr@1580 540 st->print_cr("=========================");
ysr@1580 541 print_indexed_free_lists(st);
ysr@1580 542
ysr@1580 543 st->print_cr("\n=================================");
ysr@1580 544 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 545 st->print_cr("=================================");
ysr@1580 546 print_dictionary_free_lists(st);
ysr@1580 547 }
ysr@1580 548
ysr@1580 549
duke@435 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 551 assert_lock_strong(&_freelistLock);
duke@435 552 assert(PrintFLSStatistics != 0, "Reporting error");
jmasa@3732 553 _dictionary->report_statistics();
duke@435 554 if (PrintFLSStatistics > 1) {
duke@435 555 reportIndexedFreeListStatistics();
jmasa@3732 556 size_t total_size = totalSizeInIndexedFreeLists() +
jmasa@3732 557 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
hseigel@4465 558 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
duke@435 559 }
duke@435 560 }
duke@435 561
duke@435 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 563 assert_lock_strong(&_freelistLock);
duke@435 564 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 565 "--------------------------------\n");
jmasa@3732 566 size_t total_size = totalSizeInIndexedFreeLists();
jmasa@3732 567 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
drchase@6680 568 gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
drchase@6680 569 gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
drchase@6680 570 gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
jmasa@3732 571 if (free_blocks != 0) {
drchase@6680 572 gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
duke@435 573 }
duke@435 574 }
duke@435 575
duke@435 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 577 size_t res = 0;
duke@435 578 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 579 debug_only(
duke@435 580 ssize_t recount = 0;
duke@435 581 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 582 fc = fc->next()) {
duke@435 583 recount += 1;
duke@435 584 }
duke@435 585 assert(recount == _indexedFreeList[i].count(),
duke@435 586 "Incorrect count in list");
duke@435 587 )
duke@435 588 res += _indexedFreeList[i].count();
duke@435 589 }
duke@435 590 return res;
duke@435 591 }
duke@435 592
duke@435 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 594 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 595 if (_indexedFreeList[i].head() != NULL) {
duke@435 596 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 597 return (size_t)i;
duke@435 598 }
duke@435 599 }
duke@435 600 return 0;
duke@435 601 }
duke@435 602
duke@435 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 604 HeapWord* prevEnd = end();
duke@435 605 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 606 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 607 "New end is below unallocated block");
duke@435 608 _end = value;
duke@435 609 if (prevEnd != NULL) {
duke@435 610 // Resize the underlying block offset table.
duke@435 611 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 612 if (value <= prevEnd) {
ysr@2071 613 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 614 "New end is below unallocated block");
ysr@1580 615 } else {
ysr@1580 616 // Now, take this new chunk and add it to the free blocks.
ysr@1580 617 // Note that the BOT has not yet been updated for this block.
ysr@1580 618 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 619 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 620 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 621 // Mark the boundary of the new block in BOT
ysr@1580 622 _bt.mark_block(prevEnd, value);
ysr@1580 623 // put it all in the linAB
ysr@1580 624 if (ParallelGCThreads == 0) {
ysr@1580 625 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 626 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 627 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 628 } else { // ParallelGCThreads > 0
ysr@1580 629 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 630 Mutex::_no_safepoint_check_flag);
ysr@1580 631 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 632 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 633 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 634 }
ysr@1580 635 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 636 // of chunks as they are allocated out of a LinAB are.
ysr@1580 637 } else {
ysr@1580 638 // Add the block to the free lists, if possible coalescing it
ysr@1580 639 // with the last free block, and update the BOT and census data.
ysr@1580 640 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 641 }
duke@435 642 }
duke@435 643 }
duke@435 644 }
duke@435 645
duke@435 646 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 647 CompactibleFreeListSpace* _cfls;
duke@435 648 CMSCollector* _collector;
duke@435 649 protected:
duke@435 650 // Override.
duke@435 651 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 652 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 653 HeapWord* bottom, HeapWord* top, \
duke@435 654 ClosureType* cl); \
duke@435 655 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 656 HeapWord* bottom, HeapWord* top, \
duke@435 657 ClosureType* cl); \
duke@435 658 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 659 HeapWord* bottom, HeapWord* top, \
duke@435 660 ClosureType* cl)
coleenp@4037 661 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
duke@435 662 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 663
duke@435 664 public:
duke@435 665 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 666 CMSCollector* collector,
coleenp@4037 667 ExtendedOopClosure* cl,
duke@435 668 CardTableModRefBS::PrecisionStyle precision,
duke@435 669 HeapWord* boundary) :
duke@435 670 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 671 _cfls(sp), _collector(collector) {}
duke@435 672 };
duke@435 673
duke@435 674 // We de-virtualize the block-related calls below, since we know that our
duke@435 675 // space is a CompactibleFreeListSpace.
jmasa@3294 676
duke@435 677 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 678 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 679 HeapWord* bottom, \
duke@435 680 HeapWord* top, \
duke@435 681 ClosureType* cl) { \
jmasa@3294 682 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 683 if (is_par) { \
jmasa@3294 684 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 685 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 686 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 687 } else { \
duke@435 688 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 689 } \
duke@435 690 } \
duke@435 691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 692 HeapWord* bottom, \
duke@435 693 HeapWord* top, \
duke@435 694 ClosureType* cl) { \
duke@435 695 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 696 back too far. */ \
duke@435 697 HeapWord* mr_start = mr.start(); \
duke@435 698 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 699 HeapWord* next = bottom + bot_size; \
duke@435 700 while (next < mr_start) { \
duke@435 701 bottom = next; \
duke@435 702 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 703 next = bottom + bot_size; \
duke@435 704 } \
duke@435 705 \
duke@435 706 while (bottom < top) { \
duke@435 707 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 708 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 709 oop(bottom)) && \
duke@435 710 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 711 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 712 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 713 } else { \
duke@435 714 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 715 } \
duke@435 716 } \
duke@435 717 } \
duke@435 718 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 719 HeapWord* bottom, \
duke@435 720 HeapWord* top, \
duke@435 721 ClosureType* cl) { \
duke@435 722 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 723 back too far. */ \
duke@435 724 HeapWord* mr_start = mr.start(); \
duke@435 725 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 726 HeapWord* next = bottom + bot_size; \
duke@435 727 while (next < mr_start) { \
duke@435 728 bottom = next; \
duke@435 729 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 730 next = bottom + bot_size; \
duke@435 731 } \
duke@435 732 \
duke@435 733 while (bottom < top) { \
duke@435 734 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 735 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 736 oop(bottom)) && \
duke@435 737 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 738 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 739 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 740 } else { \
duke@435 741 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 742 } \
duke@435 743 } \
duke@435 744 }
duke@435 745
duke@435 746 // (There are only two of these, rather than N, because the split is due
duke@435 747 // only to the introduction of the FilteringClosure, a local part of the
duke@435 748 // impl of this abstraction.)
coleenp@4037 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
duke@435 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 751
duke@435 752 DirtyCardToOopClosure*
coleenp@4037 753 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 754 CardTableModRefBS::PrecisionStyle precision,
duke@435 755 HeapWord* boundary) {
duke@435 756 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 757 }
duke@435 758
duke@435 759
duke@435 760 // Note on locking for the space iteration functions:
duke@435 761 // since the collector's iteration activities are concurrent with
duke@435 762 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 763 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 764 // may suddenly be allocated or divided up and part of it allocated and
duke@435 765 // so on.
duke@435 766
duke@435 767 // Apply the given closure to each block in the space.
duke@435 768 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 769 assert_lock_strong(freelistLock());
duke@435 770 HeapWord *cur, *limit;
duke@435 771 for (cur = bottom(), limit = end(); cur < limit;
duke@435 772 cur += cl->do_blk_careful(cur));
duke@435 773 }
duke@435 774
duke@435 775 // Apply the given closure to each block in the space.
duke@435 776 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 777 assert_lock_strong(freelistLock());
duke@435 778 HeapWord *cur, *limit;
duke@435 779 for (cur = bottom(), limit = end(); cur < limit;
duke@435 780 cur += cl->do_blk(cur));
duke@435 781 }
duke@435 782
duke@435 783 // Apply the given closure to each oop in the space.
coleenp@4037 784 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
duke@435 785 assert_lock_strong(freelistLock());
duke@435 786 HeapWord *cur, *limit;
duke@435 787 size_t curSize;
duke@435 788 for (cur = bottom(), limit = end(); cur < limit;
duke@435 789 cur += curSize) {
duke@435 790 curSize = block_size(cur);
duke@435 791 if (block_is_obj(cur)) {
duke@435 792 oop(cur)->oop_iterate(cl);
duke@435 793 }
duke@435 794 }
duke@435 795 }
duke@435 796
duke@435 797 // Apply the given closure to each oop in the space \intersect memory region.
coleenp@4037 798 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
duke@435 799 assert_lock_strong(freelistLock());
duke@435 800 if (is_empty()) {
duke@435 801 return;
duke@435 802 }
duke@435 803 MemRegion cur = MemRegion(bottom(), end());
duke@435 804 mr = mr.intersection(cur);
duke@435 805 if (mr.is_empty()) {
duke@435 806 return;
duke@435 807 }
duke@435 808 if (mr.equals(cur)) {
duke@435 809 oop_iterate(cl);
duke@435 810 return;
duke@435 811 }
duke@435 812 assert(mr.end() <= end(), "just took an intersection above");
duke@435 813 HeapWord* obj_addr = block_start(mr.start());
duke@435 814 HeapWord* t = mr.end();
duke@435 815
duke@435 816 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 817 if (block_is_obj(obj_addr)) {
duke@435 818 // Handle first object specially.
duke@435 819 oop obj = oop(obj_addr);
duke@435 820 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 821 } else {
duke@435 822 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 823 obj_addr += fc->size();
duke@435 824 }
duke@435 825 while (obj_addr < t) {
duke@435 826 HeapWord* obj = obj_addr;
duke@435 827 obj_addr += block_size(obj_addr);
duke@435 828 // If "obj_addr" is not greater than top, then the
duke@435 829 // entire object "obj" is within the region.
duke@435 830 if (obj_addr <= t) {
duke@435 831 if (block_is_obj(obj)) {
duke@435 832 oop(obj)->oop_iterate(cl);
duke@435 833 }
duke@435 834 } else {
duke@435 835 // "obj" extends beyond end of region
duke@435 836 if (block_is_obj(obj)) {
duke@435 837 oop(obj)->oop_iterate(&smr_blk);
duke@435 838 }
duke@435 839 break;
duke@435 840 }
duke@435 841 }
duke@435 842 }
duke@435 843
duke@435 844 // NOTE: In the following methods, in order to safely be able to
duke@435 845 // apply the closure to an object, we need to be sure that the
duke@435 846 // object has been initialized. We are guaranteed that an object
duke@435 847 // is initialized if we are holding the Heap_lock with the
duke@435 848 // world stopped.
duke@435 849 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 850 if (is_init_completed()) {
duke@435 851 assert_locked_or_safepoint(Heap_lock);
duke@435 852 if (Universe::is_fully_initialized()) {
duke@435 853 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 854 "Required for objects to be initialized");
duke@435 855 }
duke@435 856 } // else make a concession at vm start-up
duke@435 857 }
duke@435 858
duke@435 859 // Apply the given closure to each object in the space
duke@435 860 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 861 assert_lock_strong(freelistLock());
duke@435 862 NOT_PRODUCT(verify_objects_initialized());
duke@435 863 HeapWord *cur, *limit;
duke@435 864 size_t curSize;
duke@435 865 for (cur = bottom(), limit = end(); cur < limit;
duke@435 866 cur += curSize) {
duke@435 867 curSize = block_size(cur);
duke@435 868 if (block_is_obj(cur)) {
duke@435 869 blk->do_object(oop(cur));
duke@435 870 }
duke@435 871 }
duke@435 872 }
duke@435 873
jmasa@952 874 // Apply the given closure to each live object in the space
jmasa@952 875 // The usage of CompactibleFreeListSpace
jmasa@952 876 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 877 // objects in the space with references to objects that are no longer
jmasa@952 878 // valid. For example, an object may reference another object
jmasa@952 879 // that has already been sweep up (collected). This method uses
jmasa@952 880 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 881 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 882 // object is decided.
jmasa@952 883
jmasa@952 884 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 885 assert_lock_strong(freelistLock());
jmasa@952 886 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 887 HeapWord *cur, *limit;
jmasa@952 888 size_t curSize;
jmasa@952 889 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 890 cur += curSize) {
jmasa@952 891 curSize = block_size(cur);
jmasa@952 892 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 893 blk->do_object(oop(cur));
jmasa@952 894 }
jmasa@952 895 }
jmasa@952 896 }
jmasa@952 897
duke@435 898 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 899 UpwardsObjectClosure* cl) {
ysr@1580 900 assert_locked(freelistLock());
duke@435 901 NOT_PRODUCT(verify_objects_initialized());
duke@435 902 Space::object_iterate_mem(mr, cl);
duke@435 903 }
duke@435 904
duke@435 905 // Callers of this iterator beware: The closure application should
duke@435 906 // be robust in the face of uninitialized objects and should (always)
duke@435 907 // return a correct size so that the next addr + size below gives us a
duke@435 908 // valid block boundary. [See for instance,
duke@435 909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 910 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 911 HeapWord*
duke@435 912 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 913 assert_lock_strong(freelistLock());
duke@435 914 HeapWord *addr, *last;
duke@435 915 size_t size;
duke@435 916 for (addr = bottom(), last = end();
duke@435 917 addr < last; addr += size) {
duke@435 918 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 919 if (fc->is_free()) {
duke@435 920 // Since we hold the free list lock, which protects direct
duke@435 921 // allocation in this generation by mutators, a free object
duke@435 922 // will remain free throughout this iteration code.
duke@435 923 size = fc->size();
duke@435 924 } else {
duke@435 925 // Note that the object need not necessarily be initialized,
duke@435 926 // because (for instance) the free list lock does NOT protect
duke@435 927 // object initialization. The closure application below must
duke@435 928 // therefore be correct in the face of uninitialized objects.
duke@435 929 size = cl->do_object_careful(oop(addr));
duke@435 930 if (size == 0) {
duke@435 931 // An unparsable object found. Signal early termination.
duke@435 932 return addr;
duke@435 933 }
duke@435 934 }
duke@435 935 }
duke@435 936 return NULL;
duke@435 937 }
duke@435 938
duke@435 939 // Callers of this iterator beware: The closure application should
duke@435 940 // be robust in the face of uninitialized objects and should (always)
duke@435 941 // return a correct size so that the next addr + size below gives us a
duke@435 942 // valid block boundary. [See for instance,
duke@435 943 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 944 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 945 HeapWord*
duke@435 946 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 947 ObjectClosureCareful* cl) {
duke@435 948 assert_lock_strong(freelistLock());
duke@435 949 // Can't use used_region() below because it may not necessarily
duke@435 950 // be the same as [bottom(),end()); although we could
duke@435 951 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 952 // that appears too cumbersome, so we just do the simpler check
duke@435 953 // in the assertion below.
duke@435 954 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 955 "mr should be non-empty and within used space");
duke@435 956 HeapWord *addr, *end;
duke@435 957 size_t size;
duke@435 958 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 959 addr < end; addr += size) {
duke@435 960 FreeChunk* fc = (FreeChunk*)addr;
jmasa@3732 961 if (fc->is_free()) {
duke@435 962 // Since we hold the free list lock, which protects direct
duke@435 963 // allocation in this generation by mutators, a free object
duke@435 964 // will remain free throughout this iteration code.
duke@435 965 size = fc->size();
duke@435 966 } else {
duke@435 967 // Note that the object need not necessarily be initialized,
duke@435 968 // because (for instance) the free list lock does NOT protect
duke@435 969 // object initialization. The closure application below must
duke@435 970 // therefore be correct in the face of uninitialized objects.
duke@435 971 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 972 if (size == 0) {
duke@435 973 // An unparsable object found. Signal early termination.
duke@435 974 return addr;
duke@435 975 }
duke@435 976 }
duke@435 977 }
duke@435 978 return NULL;
duke@435 979 }
duke@435 980
duke@435 981
ysr@777 982 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 983 NOT_PRODUCT(verify_objects_initialized());
duke@435 984 return _bt.block_start(p);
duke@435 985 }
duke@435 986
duke@435 987 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 988 return _bt.block_start_careful(p);
duke@435 989 }
duke@435 990
duke@435 991 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 992 NOT_PRODUCT(verify_objects_initialized());
duke@435 993 // This must be volatile, or else there is a danger that the compiler
duke@435 994 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 995 // the value read the first time in a register.
duke@435 996 while (true) {
duke@435 997 // We must do this until we get a consistent view of the object.
coleenp@622 998 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 999 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1000 size_t res = fc->size();
goetz@6493 1001
goetz@6493 1002 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1003 // block's free bit was set and we have read the size of the
goetz@6493 1004 // block. Acquire and check the free bit again. If the block is
goetz@6493 1005 // still free, the read size is correct.
goetz@6493 1006 OrderAccess::acquire();
goetz@6493 1007
coleenp@622 1008 // If the object is still a free chunk, return the size, else it
coleenp@622 1009 // has been allocated so try again.
coleenp@622 1010 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1011 assert(res != 0, "Block size should not be 0");
duke@435 1012 return res;
duke@435 1013 }
coleenp@622 1014 } else {
coleenp@622 1015 // must read from what 'p' points to in each loop.
coleenp@4037 1016 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1017 if (k != NULL) {
coleenp@4037 1018 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1019 oop o = (oop)p;
coleenp@622 1020 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
goetz@6493 1021
goetz@6493 1022 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1023 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1024 // size (third word) is consistent.
goetz@6493 1025 OrderAccess::acquire();
goetz@6493 1026
coleenp@4037 1027 size_t res = o->size_given_klass(k);
coleenp@622 1028 res = adjustObjectSize(res);
coleenp@622 1029 assert(res != 0, "Block size should not be 0");
coleenp@622 1030 return res;
coleenp@622 1031 }
duke@435 1032 }
duke@435 1033 }
duke@435 1034 }
duke@435 1035
coleenp@4037 1036 // TODO: Now that is_parsable is gone, we should combine these two functions.
duke@435 1037 // A variant of the above that uses the Printezis bits for
duke@435 1038 // unparsable but allocated objects. This avoids any possible
duke@435 1039 // stalls waiting for mutators to initialize objects, and is
duke@435 1040 // thus potentially faster than the variant above. However,
duke@435 1041 // this variant may return a zero size for a block that is
duke@435 1042 // under mutation and for which a consistent size cannot be
duke@435 1043 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1044 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1045 const CMSCollector* c)
duke@435 1046 const {
duke@435 1047 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1048 // This must be volatile, or else there is a danger that the compiler
duke@435 1049 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1050 // the value read the first time in a register.
duke@435 1051 DEBUG_ONLY(uint loops = 0;)
duke@435 1052 while (true) {
duke@435 1053 // We must do this until we get a consistent view of the object.
coleenp@622 1054 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1055 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1056 size_t res = fc->size();
goetz@6493 1057
goetz@6493 1058 // Bugfix for systems with weak memory model (PPC64/IA64). The
goetz@6493 1059 // free bit of the block was set and we have read the size of
goetz@6493 1060 // the block. Acquire and check the free bit again. If the
goetz@6493 1061 // block is still free, the read size is correct.
goetz@6493 1062 OrderAccess::acquire();
goetz@6493 1063
coleenp@622 1064 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1065 assert(res != 0, "Block size should not be 0");
duke@435 1066 assert(loops == 0, "Should be 0");
duke@435 1067 return res;
duke@435 1068 }
duke@435 1069 } else {
coleenp@622 1070 // must read from what 'p' points to in each loop.
coleenp@4037 1071 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1072 // We trust the size of any object that has a non-NULL
ysr@2533 1073 // klass and (for those in the perm gen) is parsable
ysr@2533 1074 // -- irrespective of its conc_safe-ty.
coleenp@4037 1075 if (k != NULL) {
coleenp@4037 1076 assert(k->is_klass(), "Should really be klass oop.");
coleenp@622 1077 oop o = (oop)p;
coleenp@622 1078 assert(o->is_oop(), "Should be an oop");
goetz@6493 1079
goetz@6493 1080 // Bugfix for systems with weak memory model (PPC64/IA64).
goetz@6493 1081 // The object o may be an array. Acquire to make sure that the array
goetz@6493 1082 // size (third word) is consistent.
goetz@6493 1083 OrderAccess::acquire();
goetz@6493 1084
coleenp@4037 1085 size_t res = o->size_given_klass(k);
coleenp@622 1086 res = adjustObjectSize(res);
coleenp@622 1087 assert(res != 0, "Block size should not be 0");
coleenp@622 1088 return res;
coleenp@622 1089 } else {
ysr@2533 1090 // May return 0 if P-bits not present.
coleenp@622 1091 return c->block_size_if_printezis_bits(p);
coleenp@622 1092 }
duke@435 1093 }
duke@435 1094 assert(loops == 0, "Can loop at most once");
duke@435 1095 DEBUG_ONLY(loops++;)
duke@435 1096 }
duke@435 1097 }
duke@435 1098
duke@435 1099 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1100 NOT_PRODUCT(verify_objects_initialized());
duke@435 1101 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1102 FreeChunk* fc = (FreeChunk*)p;
jmasa@3732 1103 if (fc->is_free()) {
duke@435 1104 return fc->size();
duke@435 1105 } else {
duke@435 1106 // Ignore mark word because this may be a recently promoted
duke@435 1107 // object whose mark word is used to chain together grey
duke@435 1108 // objects (the last one would have a null value).
duke@435 1109 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1110 return adjustObjectSize(oop(p)->size());
duke@435 1111 }
duke@435 1112 }
duke@435 1113
duke@435 1114 // This implementation assumes that the property of "being an object" is
duke@435 1115 // stable. But being a free chunk may not be (because of parallel
duke@435 1116 // promotion.)
duke@435 1117 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1118 FreeChunk* fc = (FreeChunk*)p;
duke@435 1119 assert(is_in_reserved(p), "Should be in space");
duke@435 1120 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1121 // assertion may fail because prepare_for_compaction() uses
duke@435 1122 // space that is garbage to maintain information on ranges of
duke@435 1123 // live objects so that these live ranges can be moved as a whole.
duke@435 1124 // Comment out this assertion until that problem can be solved
duke@435 1125 // (i.e., that the block start calculation may look at objects
duke@435 1126 // at address below "p" in finding the object that contains "p"
duke@435 1127 // and those objects (if garbage) may have been modified to hold
duke@435 1128 // live range information.
jmasa@2188 1129 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1130 // "Should be a block boundary");
coleenp@622 1131 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@4037 1132 Klass* k = oop(p)->klass_or_null();
duke@435 1133 if (k != NULL) {
duke@435 1134 // Ignore mark word because it may have been used to
duke@435 1135 // chain together promoted objects (the last one
duke@435 1136 // would have a null value).
duke@435 1137 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1138 return true;
duke@435 1139 } else {
duke@435 1140 return false; // Was not an object at the start of collection.
duke@435 1141 }
duke@435 1142 }
duke@435 1143
duke@435 1144 // Check if the object is alive. This fact is checked either by consulting
duke@435 1145 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1146 // generation and we're not in the sweeping phase, by checking the
duke@435 1147 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1148 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1149 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1150 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1151 "Else races are possible");
ysr@2293 1152 assert(block_is_obj(p), "The address should point to an object");
duke@435 1153
duke@435 1154 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1155 // for both perm gen and old gen.
duke@435 1156 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1157 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1158 // main marking bit map (live_map below) is locked,
duke@435 1159 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1160 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1161 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1162 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1163 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1164 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1165 // if/when the perm gen goes away in the future.
duke@435 1166 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1167 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1168 return live_map->par_isMarked((HeapWord*) p);
duke@435 1169 }
duke@435 1170 return true;
duke@435 1171 }
duke@435 1172
duke@435 1173 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1174 FreeChunk* fc = (FreeChunk*)p;
duke@435 1175 assert(is_in_reserved(p), "Should be in space");
duke@435 1176 assert(_bt.block_start(p) == p, "Should be a block boundary");
jmasa@3732 1177 if (!fc->is_free()) {
duke@435 1178 // Ignore mark word because it may have been used to
duke@435 1179 // chain together promoted objects (the last one
duke@435 1180 // would have a null value).
duke@435 1181 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1182 return true;
duke@435 1183 }
duke@435 1184 return false;
duke@435 1185 }
duke@435 1186
duke@435 1187 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1188 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1189 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1190 size_t size = 0;
duke@435 1191 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1192 debug_only(
duke@435 1193 // We may be calling here without the lock in which case we
duke@435 1194 // won't do this modest sanity check.
duke@435 1195 if (freelistLock()->owned_by_self()) {
duke@435 1196 size_t total_list_size = 0;
duke@435 1197 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1198 fc = fc->next()) {
duke@435 1199 total_list_size += i;
duke@435 1200 }
duke@435 1201 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1202 "Count in list is incorrect");
duke@435 1203 }
duke@435 1204 )
duke@435 1205 size += i * _indexedFreeList[i].count();
duke@435 1206 }
duke@435 1207 return size;
duke@435 1208 }
duke@435 1209
duke@435 1210 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1211 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1212 return allocate(size);
duke@435 1213 }
duke@435 1214
duke@435 1215 HeapWord*
duke@435 1216 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1217 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1218 }
duke@435 1219
duke@435 1220 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1221 assert_lock_strong(freelistLock());
duke@435 1222 HeapWord* res = NULL;
duke@435 1223 assert(size == adjustObjectSize(size),
duke@435 1224 "use adjustObjectSize() before calling into allocate()");
duke@435 1225
duke@435 1226 if (_adaptive_freelists) {
duke@435 1227 res = allocate_adaptive_freelists(size);
duke@435 1228 } else { // non-adaptive free lists
duke@435 1229 res = allocate_non_adaptive_freelists(size);
duke@435 1230 }
duke@435 1231
duke@435 1232 if (res != NULL) {
duke@435 1233 // check that res does lie in this space!
duke@435 1234 assert(is_in_reserved(res), "Not in this space!");
duke@435 1235 assert(is_aligned((void*)res), "alignment check");
duke@435 1236
duke@435 1237 FreeChunk* fc = (FreeChunk*)res;
duke@435 1238 fc->markNotFree();
jmasa@3732 1239 assert(!fc->is_free(), "shouldn't be marked free");
coleenp@622 1240 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1241 // Verify that the block offset table shows this to
duke@435 1242 // be a single block, but not one which is unallocated.
duke@435 1243 _bt.verify_single_block(res, size);
duke@435 1244 _bt.verify_not_unallocated(res, size);
duke@435 1245 // mangle a just allocated object with a distinct pattern.
duke@435 1246 debug_only(fc->mangleAllocated(size));
duke@435 1247 }
duke@435 1248
duke@435 1249 return res;
duke@435 1250 }
duke@435 1251
duke@435 1252 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1253 HeapWord* res = NULL;
duke@435 1254 // try and use linear allocation for smaller blocks
duke@435 1255 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1256 // if successful, the following also adjusts block offset table
duke@435 1257 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1258 }
duke@435 1259 // Else triage to indexed lists for smaller sizes
duke@435 1260 if (res == NULL) {
duke@435 1261 if (size < SmallForDictionary) {
duke@435 1262 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1263 } else {
duke@435 1264 // else get it from the big dictionary; if even this doesn't
duke@435 1265 // work we are out of luck.
duke@435 1266 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1267 }
duke@435 1268 }
duke@435 1269
duke@435 1270 return res;
duke@435 1271 }
duke@435 1272
duke@435 1273 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1274 assert_lock_strong(freelistLock());
duke@435 1275 HeapWord* res = NULL;
duke@435 1276 assert(size == adjustObjectSize(size),
duke@435 1277 "use adjustObjectSize() before calling into allocate()");
duke@435 1278
duke@435 1279 // Strategy
duke@435 1280 // if small
duke@435 1281 // exact size from small object indexed list if small
duke@435 1282 // small or large linear allocation block (linAB) as appropriate
duke@435 1283 // take from lists of greater sized chunks
duke@435 1284 // else
duke@435 1285 // dictionary
duke@435 1286 // small or large linear allocation block if it has the space
duke@435 1287 // Try allocating exact size from indexTable first
duke@435 1288 if (size < IndexSetSize) {
duke@435 1289 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1290 if(res != NULL) {
duke@435 1291 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1292 "Not removed from free list");
duke@435 1293 // no block offset table adjustment is necessary on blocks in
duke@435 1294 // the indexed lists.
duke@435 1295
duke@435 1296 // Try allocating from the small LinAB
duke@435 1297 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1298 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1299 // if successful, the above also adjusts block offset table
duke@435 1300 // Note that this call will refill the LinAB to
duke@435 1301 // satisfy the request. This is different that
duke@435 1302 // evm.
duke@435 1303 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1304 } else {
duke@435 1305 // Raid the exact free lists larger than size, even if they are not
duke@435 1306 // overpopulated.
duke@435 1307 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1308 }
duke@435 1309 } else {
duke@435 1310 // Big objects get allocated directly from the dictionary.
duke@435 1311 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1312 if (res == NULL) {
duke@435 1313 // Try hard not to fail since an allocation failure will likely
duke@435 1314 // trigger a synchronous GC. Try to get the space from the
duke@435 1315 // allocation blocks.
duke@435 1316 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1317 }
duke@435 1318 }
duke@435 1319
duke@435 1320 return res;
duke@435 1321 }
duke@435 1322
duke@435 1323 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1324 // when promoting obj.
duke@435 1325 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1326 // Depending on the object size, expansion may require refilling either a
duke@435 1327 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1328 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1329 size_t space = obj_size;
duke@435 1330 if (!_adaptive_freelists) {
duke@435 1331 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1332 }
duke@435 1333 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1334 return space;
duke@435 1335 }
duke@435 1336
duke@435 1337 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1338 FreeChunk* ret;
duke@435 1339
duke@435 1340 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1341 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1342 "Should not be here");
duke@435 1343
duke@435 1344 size_t i;
duke@435 1345 size_t currSize = numWords + MinChunkSize;
duke@435 1346 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1347 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 1348 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
duke@435 1349 if (fl->head()) {
duke@435 1350 ret = getFromListGreater(fl, numWords);
jmasa@3732 1351 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1352 return ret;
duke@435 1353 }
duke@435 1354 }
duke@435 1355
duke@435 1356 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1357 (size_t)(numWords + MinChunkSize));
duke@435 1358
duke@435 1359 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1360 fragmentation that can't be handled. */
duke@435 1361 {
jmasa@3732 1362 ret = dictionary()->get_chunk(currSize);
duke@435 1363 if (ret != NULL) {
duke@435 1364 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1365 "Chunk is too small");
duke@435 1366 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1367 /* Carve returned chunk. */
duke@435 1368 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1369 /* Label this as no longer a free chunk. */
jmasa@3732 1370 assert(ret->is_free(), "This chunk should be free");
jmasa@3732 1371 ret->link_prev(NULL);
duke@435 1372 }
jmasa@3732 1373 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
duke@435 1374 return ret;
duke@435 1375 }
duke@435 1376 ShouldNotReachHere();
duke@435 1377 }
duke@435 1378
ysr@3220 1379 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1380 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
jmasa@3732 1381 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
duke@435 1382 }
duke@435 1383
ysr@3220 1384 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1385 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1386 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1387 "Linear allocation block shows incorrect size");
ysr@3220 1388 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1389 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1390 }
ysr@3220 1391
ysr@3220 1392 // Check if the purported free chunk is present either as a linear
ysr@3220 1393 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1394 // or the larger free blocks kept in the binary tree dictionary.
jmasa@3732 1395 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
ysr@3220 1396 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1397 return true;
ysr@3220 1398 } else if (fc->size() < IndexSetSize) {
ysr@3220 1399 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1400 } else {
jmasa@3732 1401 return dictionary()->verify_chunk_in_free_list(fc);
duke@435 1402 }
duke@435 1403 }
duke@435 1404
duke@435 1405 #ifndef PRODUCT
duke@435 1406 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1407 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1408 }
ysr@1580 1409
ysr@1580 1410 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1411 CMSLockVerifier::assert_locked(lock);
ysr@1580 1412 }
duke@435 1413 #endif
duke@435 1414
duke@435 1415 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1416 // In the parallel case, the main thread holds the free list lock
duke@435 1417 // on behalf the parallel threads.
duke@435 1418 FreeChunk* fc;
duke@435 1419 {
duke@435 1420 // If GC is parallel, this might be called by several threads.
duke@435 1421 // This should be rare enough that the locking overhead won't affect
duke@435 1422 // the sequential code.
duke@435 1423 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1424 Mutex::_no_safepoint_check_flag);
duke@435 1425 fc = getChunkFromDictionary(size);
duke@435 1426 }
duke@435 1427 if (fc != NULL) {
duke@435 1428 fc->dontCoalesce();
jmasa@3732 1429 assert(fc->is_free(), "Should be free, but not coalescable");
duke@435 1430 // Verify that the block offset table shows this to
duke@435 1431 // be a single block, but not one which is unallocated.
duke@435 1432 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1433 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1434 }
duke@435 1435 return fc;
duke@435 1436 }
duke@435 1437
coleenp@548 1438 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1439 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1440 assert_locked();
duke@435 1441
duke@435 1442 // if we are tracking promotions, then first ensure space for
duke@435 1443 // promotion (including spooling space for saving header if necessary).
duke@435 1444 // then allocate and copy, then track promoted info if needed.
duke@435 1445 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1446 // be displaced and in this case restoration of the mark word
duke@435 1447 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1448 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1449 return NULL;
duke@435 1450 }
duke@435 1451 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1452 // additional call through the allocate(size_t) form. Having
duke@435 1453 // the compile inline the call is problematic because allocate(size_t)
duke@435 1454 // is a virtual method.
duke@435 1455 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1456 if (res != NULL) {
duke@435 1457 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1458 // if we should be tracking promotions, do so.
duke@435 1459 if (_promoInfo.tracking()) {
duke@435 1460 _promoInfo.track((PromotedObject*)res);
duke@435 1461 }
duke@435 1462 }
duke@435 1463 return oop(res);
duke@435 1464 }
duke@435 1465
duke@435 1466 HeapWord*
duke@435 1467 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1468 assert_locked();
duke@435 1469 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1470 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1471 "maximum from smallLinearAllocBlock");
duke@435 1472 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1473 }
duke@435 1474
duke@435 1475 HeapWord*
duke@435 1476 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1477 size_t size) {
duke@435 1478 assert_locked();
duke@435 1479 assert(size >= MinChunkSize, "too small");
duke@435 1480 HeapWord* res = NULL;
duke@435 1481 // Try to do linear allocation from blk, making sure that
duke@435 1482 if (blk->_word_size == 0) {
duke@435 1483 // We have probably been unable to fill this either in the prologue or
duke@435 1484 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1485 // next time.
duke@435 1486 assert(blk->_ptr == NULL, "consistency check");
duke@435 1487 return NULL;
duke@435 1488 }
duke@435 1489 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1490 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1491 if (res != NULL) return res;
duke@435 1492
duke@435 1493 // about to exhaust this linear allocation block
duke@435 1494 if (blk->_word_size == size) { // exactly satisfied
duke@435 1495 res = blk->_ptr;
duke@435 1496 _bt.allocated(res, blk->_word_size);
duke@435 1497 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1498 size_t sz = blk->_word_size;
duke@435 1499 // Update _unallocated_block if the size is such that chunk would be
duke@435 1500 // returned to the indexed free list. All other chunks in the indexed
duke@435 1501 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1502 // has already been adjusted for them. Do it here so that the cost
duke@435 1503 // for all chunks added back to the indexed free lists.
ysr@1580 1504 if (sz < SmallForDictionary) {
ysr@1580 1505 _bt.allocated(blk->_ptr, sz);
duke@435 1506 }
duke@435 1507 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1508 addChunkToFreeLists(blk->_ptr, sz);
jmasa@3732 1509 split_birth(sz);
duke@435 1510 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1511 } else {
duke@435 1512 // A refilled block would not satisfy the request.
duke@435 1513 return NULL;
duke@435 1514 }
duke@435 1515
duke@435 1516 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1517 refillLinearAllocBlock(blk);
duke@435 1518 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1519 "block was replenished");
duke@435 1520 if (res != NULL) {
jmasa@3732 1521 split_birth(size);
duke@435 1522 repairLinearAllocBlock(blk);
duke@435 1523 } else if (blk->_ptr != NULL) {
duke@435 1524 res = blk->_ptr;
duke@435 1525 size_t blk_size = blk->_word_size;
duke@435 1526 blk->_word_size -= size;
duke@435 1527 blk->_ptr += size;
jmasa@3732 1528 split_birth(size);
duke@435 1529 repairLinearAllocBlock(blk);
duke@435 1530 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1531 // view of the BOT and free blocks.
duke@435 1532 // Above must occur before BOT is updated below.
ysr@2071 1533 OrderAccess::storestore();
duke@435 1534 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1535 }
duke@435 1536 return res;
duke@435 1537 }
duke@435 1538
duke@435 1539 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1540 LinearAllocBlock* blk,
duke@435 1541 size_t size) {
duke@435 1542 assert_locked();
duke@435 1543 assert(size >= MinChunkSize, "too small");
duke@435 1544
duke@435 1545 HeapWord* res = NULL;
duke@435 1546 // This is the common case. Keep it simple.
duke@435 1547 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1548 assert(blk->_ptr != NULL, "consistency check");
duke@435 1549 res = blk->_ptr;
duke@435 1550 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1551 // indicates the start of the linAB. The split_block() updates the
duke@435 1552 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1553 // next chunk to be allocated).
duke@435 1554 size_t blk_size = blk->_word_size;
duke@435 1555 blk->_word_size -= size;
duke@435 1556 blk->_ptr += size;
jmasa@3732 1557 split_birth(size);
duke@435 1558 repairLinearAllocBlock(blk);
duke@435 1559 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1560 // view of the BOT and free blocks.
duke@435 1561 // Above must occur before BOT is updated below.
ysr@2071 1562 OrderAccess::storestore();
duke@435 1563 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1564 _bt.allocated(res, size);
duke@435 1565 }
duke@435 1566 return res;
duke@435 1567 }
duke@435 1568
duke@435 1569 FreeChunk*
duke@435 1570 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1571 assert_locked();
duke@435 1572 assert(size < SmallForDictionary, "just checking");
duke@435 1573 FreeChunk* res;
jmasa@3732 1574 res = _indexedFreeList[size].get_chunk_at_head();
duke@435 1575 if (res == NULL) {
duke@435 1576 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1577 }
duke@435 1578 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1579 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1580 return res;
duke@435 1581 }
duke@435 1582
duke@435 1583 FreeChunk*
ysr@1580 1584 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1585 bool replenish) {
duke@435 1586 assert_locked();
duke@435 1587 FreeChunk* fc = NULL;
duke@435 1588 if (size < SmallForDictionary) {
duke@435 1589 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1590 _indexedFreeList[size].surplus() <= 0,
duke@435 1591 "List for this size should be empty or under populated");
duke@435 1592 // Try best fit in exact lists before replenishing the list
duke@435 1593 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1594 // Replenish list.
duke@435 1595 //
duke@435 1596 // Things tried that failed.
duke@435 1597 // Tried allocating out of the two LinAB's first before
duke@435 1598 // replenishing lists.
duke@435 1599 // Tried small linAB of size 256 (size in indexed list)
duke@435 1600 // and replenishing indexed lists from the small linAB.
duke@435 1601 //
duke@435 1602 FreeChunk* newFc = NULL;
ysr@1580 1603 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1604 if (replenish_size < SmallForDictionary) {
duke@435 1605 // Do not replenish from an underpopulated size.
duke@435 1606 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1607 _indexedFreeList[replenish_size].head() != NULL) {
jmasa@3732 1608 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
ysr@1580 1609 } else if (bestFitFirst()) {
duke@435 1610 newFc = bestFitSmall(replenish_size);
duke@435 1611 }
duke@435 1612 }
ysr@1580 1613 if (newFc == NULL && replenish_size > size) {
ysr@1580 1614 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1615 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1616 }
ysr@1580 1617 // Note: The stats update re split-death of block obtained above
ysr@1580 1618 // will be recorded below precisely when we know we are going to
ysr@1580 1619 // be actually splitting it into more than one pieces below.
duke@435 1620 if (newFc != NULL) {
ysr@1580 1621 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1622 // Replenish this list and return one block to caller.
ysr@1580 1623 size_t i;
ysr@1580 1624 FreeChunk *curFc, *nextFc;
ysr@1580 1625 size_t num_blk = newFc->size() / size;
ysr@1580 1626 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1627 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1628 if (num_blk > 1) {
ysr@1580 1629 // we are sure we will be splitting the block just obtained
ysr@1580 1630 // into multiple pieces; record the split-death of the original
ysr@1580 1631 splitDeath(replenish_size);
ysr@1580 1632 }
ysr@1580 1633 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1634 // The last chunk is not added to the lists but is returned as the
ysr@1580 1635 // free chunk.
ysr@1580 1636 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1637 i = 0;
ysr@1580 1638 i < (num_blk - 1);
ysr@1580 1639 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1640 i++) {
jmasa@3732 1641 curFc->set_size(size);
ysr@1580 1642 // Don't record this as a return in order to try and
ysr@1580 1643 // determine the "returns" from a GC.
ysr@1580 1644 _bt.verify_not_unallocated((HeapWord*) fc, size);
jmasa@3732 1645 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
ysr@1580 1646 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1647 split_birth(size);
ysr@1580 1648 // Don't record the initial population of the indexed list
ysr@1580 1649 // as a split birth.
ysr@1580 1650 }
ysr@1580 1651
ysr@1580 1652 // check that the arithmetic was OK above
ysr@1580 1653 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1654 "inconsistency in carving newFc");
jmasa@3732 1655 curFc->set_size(size);
duke@435 1656 _bt.mark_block((HeapWord*)curFc, size);
jmasa@3732 1657 split_birth(size);
ysr@1580 1658 fc = curFc;
ysr@1580 1659 } else {
ysr@1580 1660 // Return entire block to caller
ysr@1580 1661 fc = newFc;
duke@435 1662 }
duke@435 1663 }
duke@435 1664 }
duke@435 1665 } else {
duke@435 1666 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1667 // replenish the indexed free list.
duke@435 1668 fc = getChunkFromDictionaryExact(size);
duke@435 1669 }
jmasa@3732 1670 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
duke@435 1671 return fc;
duke@435 1672 }
duke@435 1673
duke@435 1674 FreeChunk*
duke@435 1675 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1676 assert_locked();
jmasa@4488 1677 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1678 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1679 if (fc == NULL) {
duke@435 1680 return NULL;
duke@435 1681 }
duke@435 1682 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1683 if (fc->size() >= size + MinChunkSize) {
duke@435 1684 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1685 }
duke@435 1686 assert(fc->size() >= size, "chunk too small");
duke@435 1687 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1688 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1689 return fc;
duke@435 1690 }
duke@435 1691
duke@435 1692 FreeChunk*
duke@435 1693 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1694 assert_locked();
jmasa@4488 1695 FreeChunk* fc = _dictionary->get_chunk(size,
jmasa@4488 1696 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1697 if (fc == NULL) {
duke@435 1698 return fc;
duke@435 1699 }
duke@435 1700 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1701 if (fc->size() == size) {
duke@435 1702 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1703 return fc;
duke@435 1704 }
jmasa@3732 1705 assert(fc->size() > size, "get_chunk() guarantee");
duke@435 1706 if (fc->size() < size + MinChunkSize) {
duke@435 1707 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1708 returnChunkToDictionary(fc);
jmasa@4488 1709 fc = _dictionary->get_chunk(size + MinChunkSize,
jmasa@4488 1710 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 1711 if (fc == NULL) {
duke@435 1712 return NULL;
duke@435 1713 }
duke@435 1714 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1715 }
duke@435 1716 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1717 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1718 assert(fc->size() == size, "chunk is wrong size");
duke@435 1719 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1720 return fc;
duke@435 1721 }
duke@435 1722
duke@435 1723 void
duke@435 1724 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1725 assert_locked();
duke@435 1726
duke@435 1727 size_t size = chunk->size();
duke@435 1728 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1729 // adjust _unallocated_block downward, as necessary
duke@435 1730 _bt.freed((HeapWord*)chunk, size);
jmasa@3732 1731 _dictionary->return_chunk(chunk);
ysr@1580 1732 #ifndef PRODUCT
ysr@1580 1733 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
goetz@6337 1734 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
goetz@6337 1735 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
jmasa@4196 1736 tl->verify_stats();
ysr@1580 1737 }
ysr@1580 1738 #endif // PRODUCT
duke@435 1739 }
duke@435 1740
duke@435 1741 void
duke@435 1742 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1743 assert_locked();
duke@435 1744 size_t size = fc->size();
duke@435 1745 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1746 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1747 if (_adaptive_freelists) {
jmasa@3732 1748 _indexedFreeList[size].return_chunk_at_tail(fc);
duke@435 1749 } else {
jmasa@3732 1750 _indexedFreeList[size].return_chunk_at_head(fc);
duke@435 1751 }
ysr@1580 1752 #ifndef PRODUCT
ysr@1580 1753 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1754 _indexedFreeList[size].verify_stats();
ysr@1580 1755 }
ysr@1580 1756 #endif // PRODUCT
duke@435 1757 }
duke@435 1758
duke@435 1759 // Add chunk to end of last block -- if it's the largest
duke@435 1760 // block -- and update BOT and census data. We would
duke@435 1761 // of course have preferred to coalesce it with the
duke@435 1762 // last block, but it's currently less expensive to find the
duke@435 1763 // largest block than it is to find the last.
duke@435 1764 void
duke@435 1765 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1766 HeapWord* chunk, size_t size) {
duke@435 1767 // check that the chunk does lie in this space!
duke@435 1768 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1769 // One of the parallel gc task threads may be here
duke@435 1770 // whilst others are allocating.
duke@435 1771 Mutex* lock = NULL;
duke@435 1772 if (ParallelGCThreads != 0) {
duke@435 1773 lock = &_parDictionaryAllocLock;
duke@435 1774 }
duke@435 1775 FreeChunk* ec;
duke@435 1776 {
duke@435 1777 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
jmasa@3732 1778 ec = dictionary()->find_largest_dict(); // get largest block
jmasa@4196 1779 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
duke@435 1780 // It's a coterminal block - we can coalesce.
duke@435 1781 size_t old_size = ec->size();
duke@435 1782 coalDeath(old_size);
duke@435 1783 removeChunkFromDictionary(ec);
duke@435 1784 size += old_size;
duke@435 1785 } else {
duke@435 1786 ec = (FreeChunk*)chunk;
duke@435 1787 }
duke@435 1788 }
jmasa@3732 1789 ec->set_size(size);
duke@435 1790 debug_only(ec->mangleFreed(size));
brutisso@5166 1791 if (size < SmallForDictionary && ParallelGCThreads != 0) {
duke@435 1792 lock = _indexedFreeListParLocks[size];
duke@435 1793 }
duke@435 1794 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1795 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1796 // record the birth under the lock since the recording involves
duke@435 1797 // manipulation of the list on which the chunk lives and
duke@435 1798 // if the chunk is allocated and is the last on the list,
duke@435 1799 // the list can go away.
duke@435 1800 coalBirth(size);
duke@435 1801 }
duke@435 1802
duke@435 1803 void
duke@435 1804 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1805 size_t size) {
duke@435 1806 // check that the chunk does lie in this space!
duke@435 1807 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1808 assert_locked();
duke@435 1809 _bt.verify_single_block(chunk, size);
duke@435 1810
duke@435 1811 FreeChunk* fc = (FreeChunk*) chunk;
jmasa@3732 1812 fc->set_size(size);
duke@435 1813 debug_only(fc->mangleFreed(size));
duke@435 1814 if (size < SmallForDictionary) {
duke@435 1815 returnChunkToFreeList(fc);
duke@435 1816 } else {
duke@435 1817 returnChunkToDictionary(fc);
duke@435 1818 }
duke@435 1819 }
duke@435 1820
duke@435 1821 void
duke@435 1822 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1823 size_t size, bool coalesced) {
duke@435 1824 assert_locked();
duke@435 1825 assert(chunk != NULL, "null chunk");
duke@435 1826 if (coalesced) {
duke@435 1827 // repair BOT
duke@435 1828 _bt.single_block(chunk, size);
duke@435 1829 }
duke@435 1830 addChunkToFreeLists(chunk, size);
duke@435 1831 }
duke@435 1832
duke@435 1833 // We _must_ find the purported chunk on our free lists;
duke@435 1834 // we assert if we don't.
duke@435 1835 void
duke@435 1836 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1837 size_t size = fc->size();
duke@435 1838 assert_locked();
duke@435 1839 debug_only(verifyFreeLists());
duke@435 1840 if (size < SmallForDictionary) {
duke@435 1841 removeChunkFromIndexedFreeList(fc);
duke@435 1842 } else {
duke@435 1843 removeChunkFromDictionary(fc);
duke@435 1844 }
duke@435 1845 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1846 debug_only(verifyFreeLists());
duke@435 1847 }
duke@435 1848
duke@435 1849 void
duke@435 1850 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1851 size_t size = fc->size();
duke@435 1852 assert_locked();
duke@435 1853 assert(fc != NULL, "null chunk");
duke@435 1854 _bt.verify_single_block((HeapWord*)fc, size);
jmasa@3732 1855 _dictionary->remove_chunk(fc);
duke@435 1856 // adjust _unallocated_block upward, as necessary
duke@435 1857 _bt.allocated((HeapWord*)fc, size);
duke@435 1858 }
duke@435 1859
duke@435 1860 void
duke@435 1861 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1862 assert_locked();
duke@435 1863 size_t size = fc->size();
duke@435 1864 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1865 NOT_PRODUCT(
duke@435 1866 if (FLSVerifyIndexTable) {
duke@435 1867 verifyIndexedFreeList(size);
duke@435 1868 }
duke@435 1869 )
jmasa@3732 1870 _indexedFreeList[size].remove_chunk(fc);
duke@435 1871 NOT_PRODUCT(
duke@435 1872 if (FLSVerifyIndexTable) {
duke@435 1873 verifyIndexedFreeList(size);
duke@435 1874 }
duke@435 1875 )
duke@435 1876 }
duke@435 1877
duke@435 1878 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1879 /* A hint is the next larger size that has a surplus.
duke@435 1880 Start search at a size large enough to guarantee that
duke@435 1881 the excess is >= MIN_CHUNK. */
duke@435 1882 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1883 if (start < IndexSetSize) {
jmasa@4196 1884 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
duke@435 1885 size_t hint = _indexedFreeList[start].hint();
duke@435 1886 while (hint < IndexSetSize) {
duke@435 1887 assert(hint % MinObjAlignment == 0, "hint should be aligned");
jmasa@4196 1888 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
duke@435 1889 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1890 // Found a list with surplus, reset original hint
duke@435 1891 // and split out a free chunk which is returned.
duke@435 1892 _indexedFreeList[start].set_hint(hint);
duke@435 1893 FreeChunk* res = getFromListGreater(fl, numWords);
jmasa@3732 1894 assert(res == NULL || res->is_free(),
duke@435 1895 "Should be returning a free chunk");
duke@435 1896 return res;
duke@435 1897 }
duke@435 1898 hint = fl->hint(); /* keep looking */
duke@435 1899 }
duke@435 1900 /* None found. */
duke@435 1901 it[start].set_hint(IndexSetSize);
duke@435 1902 }
duke@435 1903 return NULL;
duke@435 1904 }
duke@435 1905
duke@435 1906 /* Requires fl->size >= numWords + MinChunkSize */
jmasa@4196 1907 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
duke@435 1908 size_t numWords) {
duke@435 1909 FreeChunk *curr = fl->head();
duke@435 1910 size_t oldNumWords = curr->size();
duke@435 1911 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1912 assert(curr != NULL, "List is empty");
duke@435 1913 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1914 "Size of chunks in the list is too small");
duke@435 1915
jmasa@3732 1916 fl->remove_chunk(curr);
duke@435 1917 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1918 // smallSplit(oldNumWords, numWords);
duke@435 1919 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1920 // Does anything have to be done for the remainder in terms of
duke@435 1921 // fixing the card table?
jmasa@3732 1922 assert(new_chunk == NULL || new_chunk->is_free(),
duke@435 1923 "Should be returning a free chunk");
duke@435 1924 return new_chunk;
duke@435 1925 }
duke@435 1926
duke@435 1927 FreeChunk*
duke@435 1928 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1929 size_t new_size) {
duke@435 1930 assert_locked();
duke@435 1931 size_t size = chunk->size();
duke@435 1932 assert(size > new_size, "Split from a smaller block?");
duke@435 1933 assert(is_aligned(chunk), "alignment problem");
duke@435 1934 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1935 size_t rem_size = size - new_size;
duke@435 1936 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1937 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1938 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1939 assert(is_aligned(ffc), "alignment problem");
jmasa@3732 1940 ffc->set_size(rem_size);
jmasa@3732 1941 ffc->link_next(NULL);
jmasa@3732 1942 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1943 // Above must occur before BOT is updated below.
duke@435 1944 // adjust block offset table
ysr@2071 1945 OrderAccess::storestore();
jmasa@3732 1946 assert(chunk->is_free() && ffc->is_free(), "Error");
duke@435 1947 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1948 if (rem_size < SmallForDictionary) {
duke@435 1949 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1950 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1951 assert(!is_par ||
jmasa@3294 1952 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1953 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1954 returnChunkToFreeList(ffc);
duke@435 1955 split(size, rem_size);
duke@435 1956 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1957 } else {
duke@435 1958 returnChunkToDictionary(ffc);
duke@435 1959 split(size ,rem_size);
duke@435 1960 }
jmasa@3732 1961 chunk->set_size(new_size);
duke@435 1962 return chunk;
duke@435 1963 }
duke@435 1964
duke@435 1965 void
duke@435 1966 CompactibleFreeListSpace::sweep_completed() {
duke@435 1967 // Now that space is probably plentiful, refill linear
duke@435 1968 // allocation blocks as needed.
duke@435 1969 refillLinearAllocBlocksIfNeeded();
duke@435 1970 }
duke@435 1971
duke@435 1972 void
duke@435 1973 CompactibleFreeListSpace::gc_prologue() {
duke@435 1974 assert_locked();
duke@435 1975 if (PrintFLSStatistics != 0) {
duke@435 1976 gclog_or_tty->print("Before GC:\n");
duke@435 1977 reportFreeListStatistics();
duke@435 1978 }
duke@435 1979 refillLinearAllocBlocksIfNeeded();
duke@435 1980 }
duke@435 1981
duke@435 1982 void
duke@435 1983 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1984 assert_locked();
duke@435 1985 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1986 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1987 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1988 }
duke@435 1989 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1990 _promoInfo.stopTrackingPromotions();
duke@435 1991 repairLinearAllocationBlocks();
duke@435 1992 // Print Space's stats
duke@435 1993 if (PrintFLSStatistics != 0) {
duke@435 1994 gclog_or_tty->print("After GC:\n");
duke@435 1995 reportFreeListStatistics();
duke@435 1996 }
duke@435 1997 }
duke@435 1998
duke@435 1999 // Iteration support, mostly delegated from a CMS generation
duke@435 2000
duke@435 2001 void CompactibleFreeListSpace::save_marks() {
ysr@2825 2002 assert(Thread::current()->is_VM_thread(),
ysr@2825 2003 "Global variable should only be set when single-threaded");
ysr@2825 2004 // Mark the "end" of the used space at the time of this call;
duke@435 2005 // note, however, that promoted objects from this point
duke@435 2006 // on are tracked in the _promoInfo below.
ysr@2071 2007 set_saved_mark_word(unallocated_block());
ysr@2825 2008 #ifdef ASSERT
ysr@2825 2009 // Check the sanity of save_marks() etc.
ysr@2825 2010 MemRegion ur = used_region();
ysr@2825 2011 MemRegion urasm = used_region_at_save_marks();
ysr@2825 2012 assert(ur.contains(urasm),
ysr@2825 2013 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 2014 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
drchase@6680 2015 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
ysr@2825 2016 #endif
duke@435 2017 // inform allocator that promotions should be tracked.
duke@435 2018 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 2019 _promoInfo.startTrackingPromotions();
duke@435 2020 }
duke@435 2021
duke@435 2022 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 2023 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 2024 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 2025 "Shouldn't be called if using parallel gc.");
duke@435 2026 return _promoInfo.noPromotions();
duke@435 2027 }
duke@435 2028
duke@435 2029 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2030 \
duke@435 2031 void CompactibleFreeListSpace:: \
duke@435 2032 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2033 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2034 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2035 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2036 /* \
duke@435 2037 * This also restores any displaced headers and removes the elements from \
duke@435 2038 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2039 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2040 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2041 */ \
duke@435 2042 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2043 }
duke@435 2044
duke@435 2045 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2046
ysr@447 2047 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2048 return _smallLinearAllocBlock._word_size == 0;
duke@435 2049 }
duke@435 2050
duke@435 2051 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2052 // Fix up linear allocation blocks to look like free blocks
duke@435 2053 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2054 }
duke@435 2055
duke@435 2056 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2057 assert_locked();
duke@435 2058 if (blk->_ptr != NULL) {
duke@435 2059 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2060 "Minimum block size requirement");
duke@435 2061 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
jmasa@3732 2062 fc->set_size(blk->_word_size);
jmasa@3732 2063 fc->link_prev(NULL); // mark as free
duke@435 2064 fc->dontCoalesce();
jmasa@3732 2065 assert(fc->is_free(), "just marked it free");
duke@435 2066 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2067 }
duke@435 2068 }
duke@435 2069
duke@435 2070 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2071 assert_locked();
duke@435 2072 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2073 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2074 "Size of linAB should be zero if the ptr is NULL");
duke@435 2075 // Reset the linAB refill and allocation size limit.
duke@435 2076 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2077 }
duke@435 2078 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2079 }
duke@435 2080
duke@435 2081 void
duke@435 2082 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2083 assert_locked();
duke@435 2084 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2085 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2086 "blk invariant");
duke@435 2087 if (blk->_ptr == NULL) {
duke@435 2088 refillLinearAllocBlock(blk);
duke@435 2089 }
duke@435 2090 if (PrintMiscellaneous && Verbose) {
duke@435 2091 if (blk->_word_size == 0) {
duke@435 2092 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2093 }
duke@435 2094 }
duke@435 2095 }
duke@435 2096
duke@435 2097 void
duke@435 2098 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2099 assert_locked();
duke@435 2100 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2101 "linear allocation block should be empty");
duke@435 2102 FreeChunk* fc;
duke@435 2103 if (blk->_refillSize < SmallForDictionary &&
duke@435 2104 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2105 // A linAB's strategy might be to use small sizes to reduce
duke@435 2106 // fragmentation but still get the benefits of allocation from a
duke@435 2107 // linAB.
duke@435 2108 } else {
duke@435 2109 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2110 }
duke@435 2111 if (fc != NULL) {
duke@435 2112 blk->_ptr = (HeapWord*)fc;
duke@435 2113 blk->_word_size = fc->size();
duke@435 2114 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2115 }
duke@435 2116 }
duke@435 2117
ysr@447 2118 // Support for concurrent collection policy decisions.
ysr@447 2119 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2120 // In the future we might want to add in frgamentation stats --
ysr@447 2121 // including erosion of the "mountain" into this decision as well.
ysr@447 2122 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2123 }
ysr@447 2124
duke@435 2125 // Support for compaction
duke@435 2126
duke@435 2127 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2128 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2129 // prepare_for_compaction() uses the space between live objects
duke@435 2130 // so that later phase can skip dead space quickly. So verification
duke@435 2131 // of the free lists doesn't work after.
duke@435 2132 }
duke@435 2133
duke@435 2134 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2135 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2136
duke@435 2137 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2138 // In other versions of adjust_pointers(), a bail out
duke@435 2139 // based on the amount of live data in the generation
duke@435 2140 // (i.e., if 0, bail out) may be used.
duke@435 2141 // Cannot test used() == 0 here because the free lists have already
duke@435 2142 // been mangled by the compaction.
duke@435 2143
duke@435 2144 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2145 // See note about verification in prepare_for_compaction().
duke@435 2146 }
duke@435 2147
duke@435 2148 void CompactibleFreeListSpace::compact() {
duke@435 2149 SCAN_AND_COMPACT(obj_size);
duke@435 2150 }
duke@435 2151
duke@435 2152 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2153 // where fbs is free block sizes
duke@435 2154 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2155 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2156 double frag = 0.0;
duke@435 2157 size_t i;
duke@435 2158
duke@435 2159 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2160 double sz = i;
duke@435 2161 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2162 }
duke@435 2163
duke@435 2164 double totFree = itabFree +
jmasa@3732 2165 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
duke@435 2166 if (totFree > 0) {
duke@435 2167 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2168 (totFree * totFree));
duke@435 2169 frag = (double)1.0 - frag;
duke@435 2170 } else {
duke@435 2171 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2172 }
duke@435 2173 return frag;
duke@435 2174 }
duke@435 2175
duke@435 2176 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2177 float inter_sweep_current,
ysr@1580 2178 float inter_sweep_estimate,
ysr@1580 2179 float intra_sweep_estimate) {
duke@435 2180 assert_locked();
duke@435 2181 size_t i;
duke@435 2182 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2183 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
ysr@1580 2184 if (PrintFLSStatistics > 1) {
drchase@6680 2185 gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
ysr@1580 2186 }
ysr@1580 2187 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
jmasa@3732 2188 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
jmasa@3732 2189 fl->set_before_sweep(fl->count());
jmasa@3732 2190 fl->set_bfr_surp(fl->surplus());
duke@435 2191 }
jmasa@3732 2192 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
duke@435 2193 inter_sweep_current,
ysr@1580 2194 inter_sweep_estimate,
ysr@1580 2195 intra_sweep_estimate);
duke@435 2196 }
duke@435 2197
duke@435 2198 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2199 assert_locked();
duke@435 2200 size_t i;
duke@435 2201 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2202 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2203 fl->set_surplus(fl->count() -
ysr@1580 2204 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2205 }
duke@435 2206 }
duke@435 2207
duke@435 2208 void CompactibleFreeListSpace::setFLHints() {
duke@435 2209 assert_locked();
duke@435 2210 size_t i;
duke@435 2211 size_t h = IndexSetSize;
duke@435 2212 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
jmasa@4196 2213 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
duke@435 2214 fl->set_hint(h);
duke@435 2215 if (fl->surplus() > 0) {
duke@435 2216 h = i;
duke@435 2217 }
duke@435 2218 }
duke@435 2219 }
duke@435 2220
duke@435 2221 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2222 assert_locked();
ysr@3264 2223 size_t i;
duke@435 2224 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2225 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2226 fl->set_prev_sweep(fl->count());
jmasa@3732 2227 fl->set_coal_births(0);
jmasa@3732 2228 fl->set_coal_deaths(0);
jmasa@3732 2229 fl->set_split_births(0);
jmasa@3732 2230 fl->set_split_deaths(0);
duke@435 2231 }
duke@435 2232 }
duke@435 2233
ysr@447 2234 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2235 if (PrintFLSStatistics > 0) {
jmasa@3732 2236 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
ysr@1580 2237 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
drchase@6680 2238 p2i(largestAddr));
ysr@1580 2239 }
duke@435 2240 setFLSurplus();
duke@435 2241 setFLHints();
duke@435 2242 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2243 printFLCensus(sweep_count);
duke@435 2244 }
duke@435 2245 clearFLCensus();
duke@435 2246 assert_locked();
jmasa@3732 2247 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
duke@435 2248 }
duke@435 2249
duke@435 2250 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2251 if (size < SmallForDictionary) {
jmasa@4196 2252 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2253 return (fl->coal_desired() < 0) ||
jmasa@3732 2254 ((int)fl->count() > fl->coal_desired());
duke@435 2255 } else {
jmasa@3732 2256 return dictionary()->coal_dict_over_populated(size);
duke@435 2257 }
duke@435 2258 }
duke@435 2259
duke@435 2260 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2261 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2262 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2263 fl->increment_coal_births();
duke@435 2264 fl->increment_surplus();
duke@435 2265 }
duke@435 2266
duke@435 2267 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2268 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2269 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2270 fl->increment_coal_deaths();
duke@435 2271 fl->decrement_surplus();
duke@435 2272 }
duke@435 2273
duke@435 2274 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2275 if (size < SmallForDictionary) {
duke@435 2276 smallCoalBirth(size);
duke@435 2277 } else {
jmasa@4196 2278 dictionary()->dict_census_update(size,
duke@435 2279 false /* split */,
duke@435 2280 true /* birth */);
duke@435 2281 }
duke@435 2282 }
duke@435 2283
duke@435 2284 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2285 if(size < SmallForDictionary) {
duke@435 2286 smallCoalDeath(size);
duke@435 2287 } else {
jmasa@4196 2288 dictionary()->dict_census_update(size,
duke@435 2289 false /* split */,
duke@435 2290 false /* birth */);
duke@435 2291 }
duke@435 2292 }
duke@435 2293
duke@435 2294 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2295 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2296 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2297 fl->increment_split_births();
duke@435 2298 fl->increment_surplus();
duke@435 2299 }
duke@435 2300
duke@435 2301 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2302 assert(size < SmallForDictionary, "Size too large for indexed list");
jmasa@4196 2303 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
jmasa@3732 2304 fl->increment_split_deaths();
duke@435 2305 fl->decrement_surplus();
duke@435 2306 }
duke@435 2307
jmasa@3732 2308 void CompactibleFreeListSpace::split_birth(size_t size) {
duke@435 2309 if (size < SmallForDictionary) {
duke@435 2310 smallSplitBirth(size);
duke@435 2311 } else {
jmasa@4196 2312 dictionary()->dict_census_update(size,
duke@435 2313 true /* split */,
duke@435 2314 true /* birth */);
duke@435 2315 }
duke@435 2316 }
duke@435 2317
duke@435 2318 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2319 if (size < SmallForDictionary) {
duke@435 2320 smallSplitDeath(size);
duke@435 2321 } else {
jmasa@4196 2322 dictionary()->dict_census_update(size,
duke@435 2323 true /* split */,
duke@435 2324 false /* birth */);
duke@435 2325 }
duke@435 2326 }
duke@435 2327
duke@435 2328 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2329 size_t to2 = from - to1;
duke@435 2330 splitDeath(from);
jmasa@3732 2331 split_birth(to1);
jmasa@3732 2332 split_birth(to2);
duke@435 2333 }
duke@435 2334
duke@435 2335 void CompactibleFreeListSpace::print() const {
ysr@2294 2336 print_on(tty);
duke@435 2337 }
duke@435 2338
duke@435 2339 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2340 assert_locked();
duke@435 2341 repairLinearAllocationBlocks();
duke@435 2342 // Verify that the SpoolBlocks look like free blocks of
duke@435 2343 // appropriate sizes... To be done ...
duke@435 2344 }
duke@435 2345
duke@435 2346 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2347 private:
duke@435 2348 const CompactibleFreeListSpace* _sp;
duke@435 2349 const MemRegion _span;
ysr@2071 2350 HeapWord* _last_addr;
ysr@2071 2351 size_t _last_size;
ysr@2071 2352 bool _last_was_obj;
ysr@2071 2353 bool _last_was_live;
duke@435 2354
duke@435 2355 public:
duke@435 2356 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2357 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2358 _last_addr(NULL), _last_size(0),
ysr@2071 2359 _last_was_obj(false), _last_was_live(false) { }
duke@435 2360
coleenp@548 2361 virtual size_t do_blk(HeapWord* addr) {
duke@435 2362 size_t res;
ysr@2071 2363 bool was_obj = false;
ysr@2071 2364 bool was_live = false;
duke@435 2365 if (_sp->block_is_obj(addr)) {
ysr@2071 2366 was_obj = true;
duke@435 2367 oop p = oop(addr);
duke@435 2368 guarantee(p->is_oop(), "Should be an oop");
duke@435 2369 res = _sp->adjustObjectSize(p->size());
duke@435 2370 if (_sp->obj_is_alive(addr)) {
ysr@2071 2371 was_live = true;
duke@435 2372 p->verify();
duke@435 2373 }
duke@435 2374 } else {
duke@435 2375 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2376 res = fc->size();
duke@435 2377 if (FLSVerifyLists && !fc->cantCoalesce()) {
jmasa@3732 2378 guarantee(_sp->verify_chunk_in_free_list(fc),
duke@435 2379 "Chunk should be on a free list");
duke@435 2380 }
duke@435 2381 }
ysr@2071 2382 if (res == 0) {
ysr@2071 2383 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2384 gclog_or_tty->print_cr(
ysr@2071 2385 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2386 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
drchase@6680 2387 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
drchase@6680 2388 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2389 _sp->print_on(gclog_or_tty);
ysr@2071 2390 guarantee(false, "Seppuku!");
ysr@2071 2391 }
ysr@2071 2392 _last_addr = addr;
ysr@2071 2393 _last_size = res;
ysr@2071 2394 _last_was_obj = was_obj;
ysr@2071 2395 _last_was_live = was_live;
duke@435 2396 return res;
duke@435 2397 }
duke@435 2398 };
duke@435 2399
duke@435 2400 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2401 private:
duke@435 2402 const CMSCollector* _collector;
duke@435 2403 const CompactibleFreeListSpace* _sp;
duke@435 2404 const MemRegion _span;
duke@435 2405 const bool _past_remark;
duke@435 2406 const CMSBitMap* _bit_map;
duke@435 2407
coleenp@548 2408 protected:
coleenp@548 2409 void do_oop(void* p, oop obj) {
coleenp@548 2410 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2411 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2412 // Should be a valid object; the first disjunct below allows
coleenp@548 2413 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2414 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2415 // are spanned by _span (CMS heap) above.
coleenp@548 2416 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2417 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2418 "Should be an object");
coleenp@548 2419 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2420 obj->verify();
coleenp@548 2421 if (_past_remark) {
coleenp@548 2422 // Remark has been completed, the object should be marked
coleenp@548 2423 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2424 }
coleenp@548 2425 } else { // reference within CMS heap
coleenp@548 2426 if (_past_remark) {
coleenp@548 2427 // Remark has been completed -- so the referent should have
coleenp@548 2428 // been marked, if referring object is.
coleenp@548 2429 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2430 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2431 }
coleenp@548 2432 }
coleenp@548 2433 }
coleenp@548 2434 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2435 // the reference is from FLS, and points out of FLS
coleenp@548 2436 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2437 obj->verify();
coleenp@548 2438 }
coleenp@548 2439 }
coleenp@548 2440
coleenp@548 2441 template <class T> void do_oop_work(T* p) {
coleenp@548 2442 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2443 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2444 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2445 do_oop(p, obj);
coleenp@548 2446 }
coleenp@548 2447 }
coleenp@548 2448
duke@435 2449 public:
duke@435 2450 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2451 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2452 bool past_remark, CMSBitMap* bit_map) :
coleenp@4037 2453 _collector(collector), _sp(sp), _span(span),
duke@435 2454 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2455
coleenp@548 2456 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2457 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2458 };
duke@435 2459
brutisso@3711 2460 void CompactibleFreeListSpace::verify() const {
duke@435 2461 assert_lock_strong(&_freelistLock);
duke@435 2462 verify_objects_initialized();
duke@435 2463 MemRegion span = _collector->_span;
duke@435 2464 bool past_remark = (_collector->abstract_state() ==
duke@435 2465 CMSCollector::Sweeping);
duke@435 2466
duke@435 2467 ResourceMark rm;
duke@435 2468 HandleMark hm;
duke@435 2469
duke@435 2470 // Check integrity of CFL data structures
duke@435 2471 _promoInfo.verify();
duke@435 2472 _dictionary->verify();
duke@435 2473 if (FLSVerifyIndexTable) {
duke@435 2474 verifyIndexedFreeLists();
duke@435 2475 }
duke@435 2476 // Check integrity of all objects and free blocks in space
duke@435 2477 {
duke@435 2478 VerifyAllBlksClosure cl(this, span);
duke@435 2479 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2480 }
duke@435 2481 // Check that all references in the heap to FLS
duke@435 2482 // are to valid objects in FLS or that references in
duke@435 2483 // FLS are to valid objects elsewhere in the heap
duke@435 2484 if (FLSVerifyAllHeapReferences)
duke@435 2485 {
duke@435 2486 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2487 _collector->markBitMap());
duke@435 2488 CollectedHeap* ch = Universe::heap();
coleenp@4037 2489
coleenp@4037 2490 // Iterate over all oops in the heap. Uses the _no_header version
coleenp@4037 2491 // since we are not interested in following the klass pointers.
coleenp@4037 2492 ch->oop_iterate_no_header(&cl);
duke@435 2493 }
duke@435 2494
duke@435 2495 if (VerifyObjectStartArray) {
duke@435 2496 // Verify the block offset table
duke@435 2497 _bt.verify();
duke@435 2498 }
duke@435 2499 }
duke@435 2500
duke@435 2501 #ifndef PRODUCT
duke@435 2502 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2503 if (FLSVerifyLists) {
duke@435 2504 _dictionary->verify();
duke@435 2505 verifyIndexedFreeLists();
duke@435 2506 } else {
duke@435 2507 if (FLSVerifyDictionary) {
duke@435 2508 _dictionary->verify();
duke@435 2509 }
duke@435 2510 if (FLSVerifyIndexTable) {
duke@435 2511 verifyIndexedFreeLists();
duke@435 2512 }
duke@435 2513 }
duke@435 2514 }
duke@435 2515 #endif
duke@435 2516
duke@435 2517 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2518 size_t i = 0;
ysr@3264 2519 for (; i < IndexSetStart; i++) {
duke@435 2520 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2521 }
duke@435 2522 for (; i < IndexSetSize; i++) {
duke@435 2523 verifyIndexedFreeList(i);
duke@435 2524 }
duke@435 2525 }
duke@435 2526
duke@435 2527 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2528 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2529 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2530 size_t num = _indexedFreeList[size].count();
ysr@1580 2531 size_t n = 0;
ysr@3264 2532 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2533 "Slot should have been empty");
ysr@1580 2534 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2535 guarantee(fc->size() == size, "Size inconsistency");
jmasa@3732 2536 guarantee(fc->is_free(), "!free?");
duke@435 2537 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2538 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2539 }
ysr@1580 2540 guarantee(n == num, "Incorrect count");
duke@435 2541 }
duke@435 2542
duke@435 2543 #ifndef PRODUCT
ysr@3220 2544 void CompactibleFreeListSpace::check_free_list_consistency() const {
goetz@6337 2545 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
duke@435 2546 "Some sizes can't be allocated without recourse to"
duke@435 2547 " linear allocation buffers");
goetz@6337 2548 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
duke@435 2549 "else MIN_TREE_CHUNK_SIZE is wrong");
brutisso@3807 2550 assert(IndexSetStart != 0, "IndexSetStart not initialized");
brutisso@3807 2551 assert(IndexSetStride != 0, "IndexSetStride not initialized");
duke@435 2552 }
duke@435 2553 #endif
duke@435 2554
ysr@447 2555 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2556 assert_lock_strong(&_freelistLock);
jmasa@4196 2557 AdaptiveFreeList<FreeChunk> total;
ysr@447 2558 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
jmasa@4196 2559 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
jmasa@3732 2560 size_t total_free = 0;
duke@435 2561 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
jmasa@4196 2562 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
jmasa@3732 2563 total_free += fl->count() * fl->size();
ysr@447 2564 if (i % (40*IndexSetStride) == 0) {
jmasa@4196 2565 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
ysr@447 2566 }
ysr@447 2567 fl->print_on(gclog_or_tty);
jmasa@3732 2568 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
ysr@447 2569 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2570 total.set_desired( total.desired() + fl->desired() );
jmasa@3732 2571 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
jmasa@3732 2572 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
ysr@447 2573 total.set_count( total.count() + fl->count() );
jmasa@3732 2574 total.set_coal_births( total.coal_births() + fl->coal_births() );
jmasa@3732 2575 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
jmasa@3732 2576 total.set_split_births(total.split_births() + fl->split_births());
jmasa@3732 2577 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
duke@435 2578 }
ysr@447 2579 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2580 gclog_or_tty->print_cr("Total free in indexed lists "
jmasa@3732 2581 SIZE_FORMAT " words", total_free);
duke@435 2582 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
jmasa@3732 2583 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
jmasa@3732 2584 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
ysr@447 2585 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
jmasa@3732 2586 _dictionary->print_dict_census();
duke@435 2587 }
duke@435 2588
ysr@1580 2589 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2590 // CFLS_LAB
ysr@1580 2591 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2592
ysr@1580 2593 #define VECTOR_257(x) \
ysr@1580 2594 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2595 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2596 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2597 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2598 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2599 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2600 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2601 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2602 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2603 x }
ysr@1580 2604
ysr@1580 2605 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2606 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2607 // command-line, this will get reinitialized via a call to
ysr@1580 2608 // modify_initialization() below.
ysr@1580 2609 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2610 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2611 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2612 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2613
duke@435 2614 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2615 _cfls(cfls)
duke@435 2616 {
ysr@1580 2617 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2618 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2619 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2620 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2621 _indexedFreeList[i].set_size(i);
ysr@1580 2622 _num_blocks[i] = 0;
ysr@1580 2623 }
ysr@1580 2624 }
ysr@1580 2625
ysr@1580 2626 static bool _CFLS_LAB_modified = false;
ysr@1580 2627
ysr@1580 2628 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2629 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2630 _CFLS_LAB_modified = true;
ysr@1580 2631 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2632 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2633 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2634 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2635 }
duke@435 2636 }
duke@435 2637
duke@435 2638 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2639 FreeChunk* res;
ysr@2132 2640 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2641 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2642 // This locking manages sync with other large object allocations.
duke@435 2643 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2644 Mutex::_no_safepoint_check_flag);
duke@435 2645 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2646 if (res == NULL) return NULL;
duke@435 2647 } else {
jmasa@4196 2648 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
duke@435 2649 if (fl->count() == 0) {
duke@435 2650 // Attempt to refill this local free list.
ysr@1580 2651 get_from_global_pool(word_sz, fl);
duke@435 2652 // If it didn't work, give up.
duke@435 2653 if (fl->count() == 0) return NULL;
duke@435 2654 }
jmasa@3732 2655 res = fl->get_chunk_at_head();
duke@435 2656 assert(res != NULL, "Why was count non-zero?");
duke@435 2657 }
duke@435 2658 res->markNotFree();
jmasa@3732 2659 assert(!res->is_free(), "shouldn't be marked free");
coleenp@622 2660 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2661 // mangle a just allocated object with a distinct pattern.
duke@435 2662 debug_only(res->mangleAllocated(word_sz));
duke@435 2663 return (HeapWord*)res;
duke@435 2664 }
duke@435 2665
ysr@1580 2666 // Get a chunk of blocks of the right size and update related
ysr@1580 2667 // book-keeping stats
jmasa@4196 2668 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
ysr@1580 2669 // Get the #blocks we want to claim
ysr@1580 2670 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2671 assert(n_blks > 0, "Error");
ysr@1580 2672 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2673 // In some cases, when the application has a phase change,
ysr@1580 2674 // there may be a sudden and sharp shift in the object survival
ysr@1580 2675 // profile, and updating the counts at the end of a scavenge
ysr@1580 2676 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2677 // during these phase changes. It is beneficial to detect such
ysr@1580 2678 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2679 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2680 // It is protected by a product flag until we have gained
ysr@1580 2681 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2682 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2683 // small spikes, so some kind of historical smoothing based on
ysr@1580 2684 // previous experience with the greater reactivity might be useful.
ysr@1580 2685 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2686 // default.
ysr@1580 2687 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2688 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2689 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2690 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2691 }
ysr@1580 2692 assert(n_blks > 0, "Error");
ysr@1580 2693 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2694 // Update stats table entry for this block size
ysr@1580 2695 _num_blocks[word_sz] += fl->count();
ysr@1580 2696 }
ysr@1580 2697
ysr@1580 2698 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2699 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2700 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2701 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2702 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2703 "Counter inconsistency");
ysr@1580 2704 if (_global_num_workers[i] > 0) {
ysr@1580 2705 // Need to smooth wrt historical average
ysr@1580 2706 if (ResizeOldPLAB) {
ysr@1580 2707 _blocks_to_claim[i].sample(
ysr@1580 2708 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2709 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2710 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2711 }
ysr@1580 2712 // Reset counters for next round
ysr@1580 2713 _global_num_workers[i] = 0;
ysr@1580 2714 _global_num_blocks[i] = 0;
ysr@1580 2715 if (PrintOldPLAB) {
drchase@6680 2716 gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2717 }
duke@435 2718 }
duke@435 2719 }
duke@435 2720 }
duke@435 2721
ysr@3220 2722 // If this is changed in the future to allow parallel
ysr@3220 2723 // access, one would need to take the FL locks and,
ysr@3220 2724 // depending on how it is used, stagger access from
ysr@3220 2725 // parallel threads to reduce contention.
ysr@1580 2726 void CFLS_LAB::retire(int tid) {
ysr@1580 2727 // We run this single threaded with the world stopped;
ysr@1580 2728 // so no need for locks and such.
ysr@1580 2729 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2730 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2731 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2732 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2733 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2734 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2735 "Can't retire more than what we obtained");
ysr@1580 2736 if (_num_blocks[i] > 0) {
ysr@1580 2737 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2738 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2739 {
ysr@3220 2740 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2741 // Mutex::_no_safepoint_check_flag);
ysr@3220 2742
ysr@1580 2743 // Update globals stats for num_blocks used
ysr@1580 2744 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2745 _global_num_workers[i]++;
jmasa@3357 2746 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2747 if (num_retire > 0) {
ysr@1580 2748 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2749 // Reset this list.
jmasa@4196 2750 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
ysr@1580 2751 _indexedFreeList[i].set_size(i);
ysr@1580 2752 }
ysr@1580 2753 }
ysr@1580 2754 if (PrintOldPLAB) {
drchase@6680 2755 gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
ysr@1580 2756 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2757 }
ysr@1580 2758 // Reset stats for next round
ysr@1580 2759 _num_blocks[i] = 0;
ysr@1580 2760 }
ysr@1580 2761 }
ysr@1580 2762 }
ysr@1580 2763
jmasa@4196 2764 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
duke@435 2765 assert(fl->count() == 0, "Precondition.");
duke@435 2766 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2767 "Precondition");
duke@435 2768
ysr@1580 2769 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2770 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2771 // then try getting a big chunk and splitting it.
ysr@1580 2772 {
ysr@1580 2773 bool found;
ysr@1580 2774 int k;
ysr@1580 2775 size_t cur_sz;
ysr@1580 2776 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2777 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2778 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2779 k++, cur_sz = k * word_sz) {
jmasa@4196 2780 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
ysr@1580 2781 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2782 {
ysr@1580 2783 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2784 Mutex::_no_safepoint_check_flag);
jmasa@4196 2785 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2786 if (gfl->count() != 0) {
ysr@1580 2787 // nn is the number of chunks of size cur_sz that
ysr@1580 2788 // we'd need to split k-ways each, in order to create
ysr@1580 2789 // "n" chunks of size word_sz each.
ysr@1580 2790 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2791 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2792 found = true;
ysr@1580 2793 if (k > 1) {
ysr@1580 2794 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2795 // we increment the split death count by the number of blocks
ysr@1580 2796 // we just took from the cur_sz-size blocks list and which
ysr@1580 2797 // we will be splitting below.
jmasa@3732 2798 ssize_t deaths = gfl->split_deaths() +
ysr@1580 2799 fl_for_cur_sz.count();
jmasa@3732 2800 gfl->set_split_deaths(deaths);
ysr@1580 2801 }
ysr@1580 2802 }
ysr@1580 2803 }
ysr@1580 2804 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2805 if (found) {
ysr@1580 2806 if (k == 1) {
ysr@1580 2807 fl->prepend(&fl_for_cur_sz);
ysr@1580 2808 } else {
ysr@1580 2809 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2810 FreeChunk* fc;
jmasa@3732 2811 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
ysr@1580 2812 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2813 // access the main chunk sees it as a single free block until we
ysr@1580 2814 // change it.
ysr@1580 2815 size_t fc_size = fc->size();
jmasa@3732 2816 assert(fc->is_free(), "Error");
ysr@1580 2817 for (int i = k-1; i >= 0; i--) {
ysr@1580 2818 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2819 assert((i != 0) ||
jmasa@3732 2820 ((fc == ffc) && ffc->is_free() &&
ysr@2071 2821 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2822 "Counting error");
jmasa@3732 2823 ffc->set_size(word_sz);
jmasa@3732 2824 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2825 ffc->link_next(NULL);
ysr@1580 2826 // Above must occur before BOT is updated below.
ysr@2071 2827 OrderAccess::storestore();
ysr@2071 2828 // splitting from the right, fc_size == i * word_sz
ysr@2071 2829 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2830 fc_size -= word_sz;
ysr@2071 2831 assert(fc_size == i*word_sz, "Error");
ysr@2071 2832 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2833 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2834 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2835 // Push this on "fl".
jmasa@3732 2836 fl->return_chunk_at_head(ffc);
ysr@1580 2837 }
ysr@1580 2838 // TRAP
ysr@1580 2839 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2840 }
ysr@1580 2841 }
ysr@1580 2842 // Update birth stats for this block size.
ysr@1580 2843 size_t num = fl->count();
ysr@1580 2844 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2845 Mutex::_no_safepoint_check_flag);
jmasa@3732 2846 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
jmasa@3732 2847 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2848 return;
duke@435 2849 }
duke@435 2850 }
duke@435 2851 }
duke@435 2852 // Otherwise, we'll split a block from the dictionary.
duke@435 2853 FreeChunk* fc = NULL;
duke@435 2854 FreeChunk* rem_fc = NULL;
duke@435 2855 size_t rem;
duke@435 2856 {
duke@435 2857 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2858 Mutex::_no_safepoint_check_flag);
duke@435 2859 while (n > 0) {
jmasa@4196 2860 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
jmasa@3730 2861 FreeBlockDictionary<FreeChunk>::atLeast);
duke@435 2862 if (fc != NULL) {
ysr@2071 2863 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@4196 2864 dictionary()->dict_census_update(fc->size(),
duke@435 2865 true /*split*/,
duke@435 2866 false /*birth*/);
duke@435 2867 break;
duke@435 2868 } else {
duke@435 2869 n--;
duke@435 2870 }
duke@435 2871 }
duke@435 2872 if (fc == NULL) return;
ysr@2071 2873 // Otherwise, split up that block.
ysr@1580 2874 assert((ssize_t)n >= 1, "Control point invariant");
jmasa@3732 2875 assert(fc->is_free(), "Error: should be a free block");
ysr@2071 2876 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2877 const size_t nn = fc->size() / word_sz;
duke@435 2878 n = MIN2(nn, n);
ysr@1580 2879 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2880 rem = fc->size() - n * word_sz;
duke@435 2881 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2882 if (rem > 0 && rem < MinChunkSize) {
duke@435 2883 n--; rem += word_sz;
duke@435 2884 }
jmasa@1583 2885 // Note that at this point we may have n == 0.
jmasa@1583 2886 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2887
jmasa@1583 2888 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2889 // enough to leave a viable remainder. We are unable to
jmasa@1583 2890 // allocate even one block. Return fc to the
jmasa@1583 2891 // dictionary and return, leaving "fl" empty.
jmasa@1583 2892 if (n == 0) {
jmasa@1583 2893 returnChunkToDictionary(fc);
ysr@2071 2894 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2895 return;
jmasa@1583 2896 }
jmasa@1583 2897
duke@435 2898 // First return the remainder, if any.
duke@435 2899 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2900 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2901 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2902 // hit if the block is a small block.)
duke@435 2903 if (rem > 0) {
duke@435 2904 size_t prefix_size = n * word_sz;
duke@435 2905 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
jmasa@3732 2906 rem_fc->set_size(rem);
jmasa@3732 2907 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2908 rem_fc->link_next(NULL);
duke@435 2909 // Above must occur before BOT is updated below.
ysr@1580 2910 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2911 OrderAccess::storestore();
duke@435 2912 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
jmasa@3732 2913 assert(fc->is_free(), "Error");
jmasa@3732 2914 fc->set_size(prefix_size);
duke@435 2915 if (rem >= IndexSetSize) {
duke@435 2916 returnChunkToDictionary(rem_fc);
jmasa@4196 2917 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
duke@435 2918 rem_fc = NULL;
duke@435 2919 }
duke@435 2920 // Otherwise, return it to the small list below.
duke@435 2921 }
duke@435 2922 }
duke@435 2923 if (rem_fc != NULL) {
duke@435 2924 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2925 Mutex::_no_safepoint_check_flag);
duke@435 2926 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
jmasa@3732 2927 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
duke@435 2928 smallSplitBirth(rem);
duke@435 2929 }
ysr@1580 2930 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2931 // Now do the splitting up.
duke@435 2932 // Must do this in reverse order, so that anybody attempting to
duke@435 2933 // access the main chunk sees it as a single free block until we
duke@435 2934 // change it.
duke@435 2935 size_t fc_size = n * word_sz;
duke@435 2936 // All but first chunk in this loop
duke@435 2937 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2938 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
jmasa@3732 2939 ffc->set_size(word_sz);
jmasa@3732 2940 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
jmasa@3732 2941 ffc->link_next(NULL);
duke@435 2942 // Above must occur before BOT is updated below.
ysr@2071 2943 OrderAccess::storestore();
duke@435 2944 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2945 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2946 fc_size -= word_sz;
duke@435 2947 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2948 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2949 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2950 // Push this on "fl".
jmasa@3732 2951 fl->return_chunk_at_head(ffc);
duke@435 2952 }
duke@435 2953 // First chunk
jmasa@3732 2954 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2955 // The blocks above should show their new sizes before the first block below
jmasa@3732 2956 fc->set_size(word_sz);
jmasa@3732 2957 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
jmasa@3732 2958 fc->link_next(NULL);
duke@435 2959 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2960 _bt.verify_single_block((HeapWord*)fc, fc->size());
jmasa@3732 2961 fl->return_chunk_at_head(fc);
duke@435 2962
ysr@1580 2963 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2964 {
ysr@1580 2965 // Update the stats for this block size.
duke@435 2966 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2967 Mutex::_no_safepoint_check_flag);
jmasa@3732 2968 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
jmasa@3732 2969 _indexedFreeList[word_sz].set_split_births(births);
ysr@1580 2970 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2971 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2972 }
duke@435 2973
duke@435 2974 // TRAP
duke@435 2975 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2976 }
duke@435 2977
duke@435 2978 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2979 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2980 // XXX Need to suitably abstract and generalize this and the next
duke@435 2981 // method into one.
duke@435 2982 void
duke@435 2983 CompactibleFreeListSpace::
duke@435 2984 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2985 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2986 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2987 const size_t task_size = rescan_task_size();
duke@435 2988 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2989 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2990 assert(n_tasks == 0 ||
ysr@775 2991 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2992 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2993 "n_tasks calculation incorrect");
duke@435 2994 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2995 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2996 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2997 // need to finish in order to be done).
jmasa@2188 2998 pst->set_n_threads(n_threads);
duke@435 2999 pst->set_n_tasks((int)n_tasks);
duke@435 3000 }
duke@435 3001
duke@435 3002 // Set up the space's par_seq_tasks structure for work claiming
duke@435 3003 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 3004 void
duke@435 3005 CompactibleFreeListSpace::
duke@435 3006 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 3007 HeapWord* low) {
duke@435 3008 // The "size" of each task is fixed according to rescan_task_size.
duke@435 3009 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3010 const size_t task_size = marking_task_size();
duke@435 3011 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3012 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3013 "Otherwise arithmetic below would be incorrect");
duke@435 3014 MemRegion span = _gen->reserved();
duke@435 3015 if (low != NULL) {
duke@435 3016 if (span.contains(low)) {
duke@435 3017 // Align low down to a card boundary so that
duke@435 3018 // we can use block_offset_careful() on span boundaries.
duke@435 3019 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3020 CardTableModRefBS::card_size);
duke@435 3021 // Clip span prefix at aligned_low
duke@435 3022 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3023 } else if (low > span.end()) {
duke@435 3024 span = MemRegion(low, low); // Null region
duke@435 3025 } // else use entire span
duke@435 3026 }
duke@435 3027 assert(span.is_empty() ||
duke@435 3028 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3029 "span should start at a card boundary");
duke@435 3030 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3031 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3032 assert(n_tasks == 0 ||
duke@435 3033 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3034 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3035 "n_tasks calculation incorrect");
duke@435 3036 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3037 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3038 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3039 // need to finish in order to be done).
jmasa@2188 3040 pst->set_n_threads(n_threads);
duke@435 3041 pst->set_n_tasks((int)n_tasks);
duke@435 3042 }

mercurial