src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 12 Oct 2017 21:27:07 +0800

author
aoqi
date
Thu, 12 Oct 2017 21:27:07 +0800
changeset 7535
7ae4e26cb1e0
parent 7485
9fa3bf3043a2
parent 6876
710a3c8b516e
child 9806
758c07667682
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
aoqi@0 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
aoqi@0 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
aoqi@0 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
aoqi@0 30 #include "gc_implementation/shared/liveRange.hpp"
aoqi@0 31 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 32 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 33 #include "memory/allocation.inline.hpp"
aoqi@0 34 #include "memory/blockOffsetTable.inline.hpp"
aoqi@0 35 #include "memory/resourceArea.hpp"
goetz@6912 36 #include "memory/space.inline.hpp"
aoqi@0 37 #include "memory/universe.inline.hpp"
aoqi@0 38 #include "oops/oop.inline.hpp"
aoqi@0 39 #include "runtime/globals.hpp"
aoqi@0 40 #include "runtime/handles.inline.hpp"
aoqi@0 41 #include "runtime/init.hpp"
aoqi@0 42 #include "runtime/java.hpp"
goetz@6911 43 #include "runtime/orderAccess.inline.hpp"
aoqi@0 44 #include "runtime/vmThread.hpp"
aoqi@0 45 #include "utilities/copy.hpp"
aoqi@0 46
aoqi@0 47 /////////////////////////////////////////////////////////////////////////
aoqi@0 48 //// CompactibleFreeListSpace
aoqi@0 49 /////////////////////////////////////////////////////////////////////////
aoqi@0 50
aoqi@0 51 // highest ranked free list lock rank
aoqi@0 52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
aoqi@0 53
aoqi@0 54 // Defaults are 0 so things will break badly if incorrectly initialized.
aoqi@0 55 size_t CompactibleFreeListSpace::IndexSetStart = 0;
aoqi@0 56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
aoqi@0 57
aoqi@0 58 size_t MinChunkSize = 0;
aoqi@0 59
aoqi@0 60 void CompactibleFreeListSpace::set_cms_values() {
aoqi@0 61 // Set CMS global values
aoqi@0 62 assert(MinChunkSize == 0, "already set");
aoqi@0 63
aoqi@0 64 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
aoqi@0 65 // for chunks to contain a FreeChunk.
aoqi@0 66 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
aoqi@0 67 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
aoqi@0 68
aoqi@0 69 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
aoqi@0 70 IndexSetStart = MinChunkSize;
aoqi@0 71 IndexSetStride = MinObjAlignment;
aoqi@0 72 }
aoqi@0 73
aoqi@0 74 // Constructor
aoqi@0 75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
aoqi@0 76 MemRegion mr, bool use_adaptive_freelists,
aoqi@0 77 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
aoqi@0 78 _dictionaryChoice(dictionaryChoice),
aoqi@0 79 _adaptive_freelists(use_adaptive_freelists),
aoqi@0 80 _bt(bs, mr),
aoqi@0 81 // free list locks are in the range of values taken by _lockRank
aoqi@0 82 // This range currently is [_leaf+2, _leaf+3]
aoqi@0 83 // Note: this requires that CFLspace c'tors
aoqi@0 84 // are called serially in the order in which the locks are
aoqi@0 85 // are acquired in the program text. This is true today.
aoqi@0 86 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
aoqi@0 87 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
aoqi@0 88 "CompactibleFreeListSpace._dict_par_lock", true),
aoqi@0 89 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
aoqi@0 90 CMSRescanMultiple),
aoqi@0 91 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
aoqi@0 92 CMSConcMarkMultiple),
aoqi@0 93 _collector(NULL)
aoqi@0 94 {
aoqi@0 95 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
aoqi@0 96 "FreeChunk is larger than expected");
aoqi@0 97 _bt.set_space(this);
aoqi@0 98 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
aoqi@0 99 // We have all of "mr", all of which we place in the dictionary
aoqi@0 100 // as one big chunk. We'll need to decide here which of several
aoqi@0 101 // possible alternative dictionary implementations to use. For
aoqi@0 102 // now the choice is easy, since we have only one working
aoqi@0 103 // implementation, namely, the simple binary tree (splaying
aoqi@0 104 // temporarily disabled).
aoqi@0 105 switch (dictionaryChoice) {
aoqi@0 106 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
aoqi@0 107 _dictionary = new AFLBinaryTreeDictionary(mr);
aoqi@0 108 break;
aoqi@0 109 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
aoqi@0 110 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
aoqi@0 111 default:
aoqi@0 112 warning("dictionaryChoice: selected option not understood; using"
aoqi@0 113 " default BinaryTreeDictionary implementation instead.");
aoqi@0 114 }
aoqi@0 115 assert(_dictionary != NULL, "CMS dictionary initialization");
aoqi@0 116 // The indexed free lists are initially all empty and are lazily
aoqi@0 117 // filled in on demand. Initialize the array elements to NULL.
aoqi@0 118 initializeIndexedFreeListArray();
aoqi@0 119
aoqi@0 120 // Not using adaptive free lists assumes that allocation is first
aoqi@0 121 // from the linAB's. Also a cms perm gen which can be compacted
aoqi@0 122 // has to have the klass's klassKlass allocated at a lower
aoqi@0 123 // address in the heap than the klass so that the klassKlass is
aoqi@0 124 // moved to its new location before the klass is moved.
aoqi@0 125 // Set the _refillSize for the linear allocation blocks
aoqi@0 126 if (!use_adaptive_freelists) {
aoqi@0 127 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
aoqi@0 128 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 129 // The small linAB initially has all the space and will allocate
aoqi@0 130 // a chunk of any size.
aoqi@0 131 HeapWord* addr = (HeapWord*) fc;
aoqi@0 132 _smallLinearAllocBlock.set(addr, fc->size() ,
aoqi@0 133 1024*SmallForLinearAlloc, fc->size());
aoqi@0 134 // Note that _unallocated_block is not updated here.
aoqi@0 135 // Allocations from the linear allocation block should
aoqi@0 136 // update it.
aoqi@0 137 } else {
aoqi@0 138 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
aoqi@0 139 SmallForLinearAlloc);
aoqi@0 140 }
aoqi@0 141 // CMSIndexedFreeListReplenish should be at least 1
aoqi@0 142 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
aoqi@0 143 _promoInfo.setSpace(this);
aoqi@0 144 if (UseCMSBestFit) {
aoqi@0 145 _fitStrategy = FreeBlockBestFitFirst;
aoqi@0 146 } else {
aoqi@0 147 _fitStrategy = FreeBlockStrategyNone;
aoqi@0 148 }
aoqi@0 149 check_free_list_consistency();
aoqi@0 150
aoqi@0 151 // Initialize locks for parallel case.
aoqi@0 152
aoqi@0 153 if (CollectedHeap::use_parallel_gc_threads()) {
aoqi@0 154 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 155 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
aoqi@0 156 "a freelist par lock",
aoqi@0 157 true);
aoqi@0 158 DEBUG_ONLY(
aoqi@0 159 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
aoqi@0 160 )
aoqi@0 161 }
aoqi@0 162 _dictionary->set_par_lock(&_parDictionaryAllocLock);
aoqi@0 163 }
aoqi@0 164 }
aoqi@0 165
aoqi@0 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
aoqi@0 167 // update the block offset table. Removed initialize_threshold call because
aoqi@0 168 // CFLS does not use a block offset array for contiguous spaces.
aoqi@0 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
aoqi@0 170 CompactPoint* cp, HeapWord* compact_top) {
aoqi@0 171 // q is alive
aoqi@0 172 // First check if we should switch compaction space
aoqi@0 173 assert(this == cp->space, "'this' should be current compaction space.");
aoqi@0 174 size_t compaction_max_size = pointer_delta(end(), compact_top);
aoqi@0 175 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
aoqi@0 176 "virtual adjustObjectSize_v() method is not correct");
aoqi@0 177 size_t adjusted_size = adjustObjectSize(size);
aoqi@0 178 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
aoqi@0 179 "no small fragments allowed");
aoqi@0 180 assert(minimum_free_block_size() == MinChunkSize,
aoqi@0 181 "for de-virtualized reference below");
aoqi@0 182 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
aoqi@0 183 if (adjusted_size + MinChunkSize > compaction_max_size &&
aoqi@0 184 adjusted_size != compaction_max_size) {
aoqi@0 185 do {
aoqi@0 186 // switch to next compaction space
aoqi@0 187 cp->space->set_compaction_top(compact_top);
aoqi@0 188 cp->space = cp->space->next_compaction_space();
aoqi@0 189 if (cp->space == NULL) {
aoqi@0 190 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
aoqi@0 191 assert(cp->gen != NULL, "compaction must succeed");
aoqi@0 192 cp->space = cp->gen->first_compaction_space();
aoqi@0 193 assert(cp->space != NULL, "generation must have a first compaction space");
aoqi@0 194 }
aoqi@0 195 compact_top = cp->space->bottom();
aoqi@0 196 cp->space->set_compaction_top(compact_top);
aoqi@0 197 // The correct adjusted_size may not be the same as that for this method
aoqi@0 198 // (i.e., cp->space may no longer be "this" so adjust the size again.
aoqi@0 199 // Use the virtual method which is not used above to save the virtual
aoqi@0 200 // dispatch.
aoqi@0 201 adjusted_size = cp->space->adjust_object_size_v(size);
aoqi@0 202 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
aoqi@0 203 assert(cp->space->minimum_free_block_size() == 0, "just checking");
aoqi@0 204 } while (adjusted_size > compaction_max_size);
aoqi@0 205 }
aoqi@0 206
aoqi@0 207 // store the forwarding pointer into the mark word
aoqi@0 208 if ((HeapWord*)q != compact_top) {
aoqi@0 209 q->forward_to(oop(compact_top));
aoqi@0 210 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
aoqi@0 211 } else {
aoqi@0 212 // if the object isn't moving we can just set the mark to the default
aoqi@0 213 // mark and handle it specially later on.
aoqi@0 214 q->init_mark();
aoqi@0 215 assert(q->forwardee() == NULL, "should be forwarded to NULL");
aoqi@0 216 }
aoqi@0 217
aoqi@0 218 compact_top += adjusted_size;
aoqi@0 219
aoqi@0 220 // we need to update the offset table so that the beginnings of objects can be
aoqi@0 221 // found during scavenge. Note that we are updating the offset table based on
aoqi@0 222 // where the object will be once the compaction phase finishes.
aoqi@0 223
aoqi@0 224 // Always call cross_threshold(). A contiguous space can only call it when
aoqi@0 225 // the compaction_top exceeds the current threshold but not for an
aoqi@0 226 // non-contiguous space.
aoqi@0 227 cp->threshold =
aoqi@0 228 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
aoqi@0 229 return compact_top;
aoqi@0 230 }
aoqi@0 231
aoqi@0 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
aoqi@0 233 // and use of single_block instead of alloc_block. The name here is not really
aoqi@0 234 // appropriate - maybe a more general name could be invented for both the
aoqi@0 235 // contiguous and noncontiguous spaces.
aoqi@0 236
aoqi@0 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
aoqi@0 238 _bt.single_block(start, the_end);
aoqi@0 239 return end();
aoqi@0 240 }
aoqi@0 241
aoqi@0 242 // Initialize them to NULL.
aoqi@0 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
aoqi@0 244 for (size_t i = 0; i < IndexSetSize; i++) {
aoqi@0 245 // Note that on platforms where objects are double word aligned,
aoqi@0 246 // the odd array elements are not used. It is convenient, however,
aoqi@0 247 // to map directly from the object size to the array element.
aoqi@0 248 _indexedFreeList[i].reset(IndexSetSize);
aoqi@0 249 _indexedFreeList[i].set_size(i);
aoqi@0 250 assert(_indexedFreeList[i].count() == 0, "reset check failed");
aoqi@0 251 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
aoqi@0 252 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
aoqi@0 253 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
aoqi@0 254 }
aoqi@0 255 }
aoqi@0 256
aoqi@0 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
aoqi@0 258 for (size_t i = 1; i < IndexSetSize; i++) {
aoqi@0 259 assert(_indexedFreeList[i].size() == (size_t) i,
aoqi@0 260 "Indexed free list sizes are incorrect");
aoqi@0 261 _indexedFreeList[i].reset(IndexSetSize);
aoqi@0 262 assert(_indexedFreeList[i].count() == 0, "reset check failed");
aoqi@0 263 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
aoqi@0 264 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
aoqi@0 265 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268
aoqi@0 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
aoqi@0 270 resetIndexedFreeListArray();
aoqi@0 271 dictionary()->reset();
aoqi@0 272 if (BlockOffsetArrayUseUnallocatedBlock) {
aoqi@0 273 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
aoqi@0 274 // Everything's allocated until proven otherwise.
aoqi@0 275 _bt.set_unallocated_block(end());
aoqi@0 276 }
aoqi@0 277 if (!mr.is_empty()) {
aoqi@0 278 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
aoqi@0 279 _bt.single_block(mr.start(), mr.word_size());
aoqi@0 280 FreeChunk* fc = (FreeChunk*) mr.start();
aoqi@0 281 fc->set_size(mr.word_size());
aoqi@0 282 if (mr.word_size() >= IndexSetSize ) {
aoqi@0 283 returnChunkToDictionary(fc);
aoqi@0 284 } else {
aoqi@0 285 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 286 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
aoqi@0 287 }
aoqi@0 288 coalBirth(mr.word_size());
aoqi@0 289 }
aoqi@0 290 _promoInfo.reset();
aoqi@0 291 _smallLinearAllocBlock._ptr = NULL;
aoqi@0 292 _smallLinearAllocBlock._word_size = 0;
aoqi@0 293 }
aoqi@0 294
aoqi@0 295 void CompactibleFreeListSpace::reset_after_compaction() {
aoqi@0 296 // Reset the space to the new reality - one free chunk.
aoqi@0 297 MemRegion mr(compaction_top(), end());
aoqi@0 298 reset(mr);
aoqi@0 299 // Now refill the linear allocation block(s) if possible.
aoqi@0 300 if (_adaptive_freelists) {
aoqi@0 301 refillLinearAllocBlocksIfNeeded();
aoqi@0 302 } else {
aoqi@0 303 // Place as much of mr in the linAB as we can get,
aoqi@0 304 // provided it was big enough to go into the dictionary.
aoqi@0 305 FreeChunk* fc = dictionary()->find_largest_dict();
aoqi@0 306 if (fc != NULL) {
aoqi@0 307 assert(fc->size() == mr.word_size(),
aoqi@0 308 "Why was the chunk broken up?");
aoqi@0 309 removeChunkFromDictionary(fc);
aoqi@0 310 HeapWord* addr = (HeapWord*) fc;
aoqi@0 311 _smallLinearAllocBlock.set(addr, fc->size() ,
aoqi@0 312 1024*SmallForLinearAlloc, fc->size());
aoqi@0 313 // Note that _unallocated_block is not updated here.
aoqi@0 314 }
aoqi@0 315 }
aoqi@0 316 }
aoqi@0 317
aoqi@0 318 // Walks the entire dictionary, returning a coterminal
aoqi@0 319 // chunk, if it exists. Use with caution since it involves
aoqi@0 320 // a potentially complete walk of a potentially large tree.
aoqi@0 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
aoqi@0 322
aoqi@0 323 assert_lock_strong(&_freelistLock);
aoqi@0 324
aoqi@0 325 return dictionary()->find_chunk_ends_at(end());
aoqi@0 326 }
aoqi@0 327
aoqi@0 328
aoqi@0 329 #ifndef PRODUCT
aoqi@0 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
aoqi@0 331 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 332 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
aoqi@0 333 }
aoqi@0 334 }
aoqi@0 335
aoqi@0 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
aoqi@0 337 size_t sum = 0;
aoqi@0 338 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 339 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
aoqi@0 340 }
aoqi@0 341 return sum;
aoqi@0 342 }
aoqi@0 343
aoqi@0 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
aoqi@0 345 size_t count = 0;
aoqi@0 346 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
aoqi@0 347 debug_only(
aoqi@0 348 ssize_t total_list_count = 0;
aoqi@0 349 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 350 fc = fc->next()) {
aoqi@0 351 total_list_count++;
aoqi@0 352 }
aoqi@0 353 assert(total_list_count == _indexedFreeList[i].count(),
aoqi@0 354 "Count in list is incorrect");
aoqi@0 355 )
aoqi@0 356 count += _indexedFreeList[i].count();
aoqi@0 357 }
aoqi@0 358 return count;
aoqi@0 359 }
aoqi@0 360
aoqi@0 361 size_t CompactibleFreeListSpace::totalCount() {
aoqi@0 362 size_t num = totalCountInIndexedFreeLists();
aoqi@0 363 num += dictionary()->total_count();
aoqi@0 364 if (_smallLinearAllocBlock._word_size != 0) {
aoqi@0 365 num++;
aoqi@0 366 }
aoqi@0 367 return num;
aoqi@0 368 }
aoqi@0 369 #endif
aoqi@0 370
aoqi@0 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
aoqi@0 372 FreeChunk* fc = (FreeChunk*) p;
aoqi@0 373 return fc->is_free();
aoqi@0 374 }
aoqi@0 375
aoqi@0 376 size_t CompactibleFreeListSpace::used() const {
aoqi@0 377 return capacity() - free();
aoqi@0 378 }
aoqi@0 379
aoqi@0 380 size_t CompactibleFreeListSpace::free() const {
aoqi@0 381 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
aoqi@0 382 // if you do this while the structures are in flux you
aoqi@0 383 // may get an approximate answer only; for instance
aoqi@0 384 // because there is concurrent allocation either
aoqi@0 385 // directly by mutators or for promotion during a GC.
aoqi@0 386 // It's "MT-safe", however, in the sense that you are guaranteed
aoqi@0 387 // not to crash and burn, for instance, because of walking
aoqi@0 388 // pointers that could disappear as you were walking them.
aoqi@0 389 // The approximation is because the various components
aoqi@0 390 // that are read below are not read atomically (and
aoqi@0 391 // further the computation of totalSizeInIndexedFreeLists()
aoqi@0 392 // is itself a non-atomic computation. The normal use of
aoqi@0 393 // this is during a resize operation at the end of GC
aoqi@0 394 // and at that time you are guaranteed to get the
aoqi@0 395 // correct actual value. However, for instance, this is
aoqi@0 396 // also read completely asynchronously by the "perf-sampler"
aoqi@0 397 // that supports jvmstat, and you are apt to see the values
aoqi@0 398 // flicker in such cases.
aoqi@0 399 assert(_dictionary != NULL, "No _dictionary?");
aoqi@0 400 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
aoqi@0 401 totalSizeInIndexedFreeLists() +
aoqi@0 402 _smallLinearAllocBlock._word_size) * HeapWordSize;
aoqi@0 403 }
aoqi@0 404
aoqi@0 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
aoqi@0 406 assert(_dictionary != NULL, "No _dictionary?");
aoqi@0 407 assert_locked();
aoqi@0 408 size_t res = _dictionary->max_chunk_size();
aoqi@0 409 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
aoqi@0 410 (size_t) SmallForLinearAlloc - 1));
aoqi@0 411 // XXX the following could potentially be pretty slow;
aoqi@0 412 // should one, pesimally for the rare cases when res
aoqi@0 413 // caclulated above is less than IndexSetSize,
aoqi@0 414 // just return res calculated above? My reasoning was that
aoqi@0 415 // those cases will be so rare that the extra time spent doesn't
aoqi@0 416 // really matter....
aoqi@0 417 // Note: do not change the loop test i >= res + IndexSetStride
aoqi@0 418 // to i > res below, because i is unsigned and res may be zero.
aoqi@0 419 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
aoqi@0 420 i -= IndexSetStride) {
aoqi@0 421 if (_indexedFreeList[i].head() != NULL) {
aoqi@0 422 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
aoqi@0 423 return i;
aoqi@0 424 }
aoqi@0 425 }
aoqi@0 426 return res;
aoqi@0 427 }
aoqi@0 428
aoqi@0 429 void LinearAllocBlock::print_on(outputStream* st) const {
aoqi@0 430 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
aoqi@0 431 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
aoqi@0 432 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
aoqi@0 433 }
aoqi@0 434
aoqi@0 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
aoqi@0 436 st->print_cr("COMPACTIBLE FREELIST SPACE");
aoqi@0 437 st->print_cr(" Space:");
aoqi@0 438 Space::print_on(st);
aoqi@0 439
aoqi@0 440 st->print_cr("promoInfo:");
aoqi@0 441 _promoInfo.print_on(st);
aoqi@0 442
aoqi@0 443 st->print_cr("_smallLinearAllocBlock");
aoqi@0 444 _smallLinearAllocBlock.print_on(st);
aoqi@0 445
aoqi@0 446 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
aoqi@0 447
aoqi@0 448 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
aoqi@0 449 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
aoqi@0 450 }
aoqi@0 451
aoqi@0 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
aoqi@0 453 const {
aoqi@0 454 reportIndexedFreeListStatistics();
aoqi@0 455 gclog_or_tty->print_cr("Layout of Indexed Freelists");
aoqi@0 456 gclog_or_tty->print_cr("---------------------------");
aoqi@0 457 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
aoqi@0 458 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 459 _indexedFreeList[i].print_on(gclog_or_tty);
aoqi@0 460 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 461 fc = fc->next()) {
aoqi@0 462 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
aoqi@0 463 p2i(fc), p2i((HeapWord*)fc + i),
aoqi@0 464 fc->cantCoalesce() ? "\t CC" : "");
aoqi@0 465 }
aoqi@0 466 }
aoqi@0 467 }
aoqi@0 468
aoqi@0 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
aoqi@0 470 const {
aoqi@0 471 _promoInfo.print_on(st);
aoqi@0 472 }
aoqi@0 473
aoqi@0 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
aoqi@0 475 const {
aoqi@0 476 _dictionary->report_statistics();
aoqi@0 477 st->print_cr("Layout of Freelists in Tree");
aoqi@0 478 st->print_cr("---------------------------");
aoqi@0 479 _dictionary->print_free_lists(st);
aoqi@0 480 }
aoqi@0 481
aoqi@0 482 class BlkPrintingClosure: public BlkClosure {
aoqi@0 483 const CMSCollector* _collector;
aoqi@0 484 const CompactibleFreeListSpace* _sp;
aoqi@0 485 const CMSBitMap* _live_bit_map;
aoqi@0 486 const bool _post_remark;
aoqi@0 487 outputStream* _st;
aoqi@0 488 public:
aoqi@0 489 BlkPrintingClosure(const CMSCollector* collector,
aoqi@0 490 const CompactibleFreeListSpace* sp,
aoqi@0 491 const CMSBitMap* live_bit_map,
aoqi@0 492 outputStream* st):
aoqi@0 493 _collector(collector),
aoqi@0 494 _sp(sp),
aoqi@0 495 _live_bit_map(live_bit_map),
aoqi@0 496 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
aoqi@0 497 _st(st) { }
aoqi@0 498 size_t do_blk(HeapWord* addr);
aoqi@0 499 };
aoqi@0 500
aoqi@0 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
aoqi@0 502 size_t sz = _sp->block_size_no_stall(addr, _collector);
aoqi@0 503 assert(sz != 0, "Should always be able to compute a size");
aoqi@0 504 if (_sp->block_is_obj(addr)) {
aoqi@0 505 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
aoqi@0 506 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
aoqi@0 507 p2i(addr),
aoqi@0 508 dead ? "dead" : "live",
aoqi@0 509 sz,
aoqi@0 510 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
aoqi@0 511 if (CMSPrintObjectsInDump && !dead) {
aoqi@0 512 oop(addr)->print_on(_st);
aoqi@0 513 _st->print_cr("--------------------------------------");
aoqi@0 514 }
aoqi@0 515 } else { // free block
aoqi@0 516 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
aoqi@0 517 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
aoqi@0 518 if (CMSPrintChunksInDump) {
aoqi@0 519 ((FreeChunk*)addr)->print_on(_st);
aoqi@0 520 _st->print_cr("--------------------------------------");
aoqi@0 521 }
aoqi@0 522 }
aoqi@0 523 return sz;
aoqi@0 524 }
aoqi@0 525
aoqi@0 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
aoqi@0 527 outputStream* st) {
aoqi@0 528 st->print_cr("\n=========================");
aoqi@0 529 st->print_cr("Block layout in CMS Heap:");
aoqi@0 530 st->print_cr("=========================");
aoqi@0 531 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
aoqi@0 532 blk_iterate(&bpcl);
aoqi@0 533
aoqi@0 534 st->print_cr("\n=======================================");
aoqi@0 535 st->print_cr("Order & Layout of Promotion Info Blocks");
aoqi@0 536 st->print_cr("=======================================");
aoqi@0 537 print_promo_info_blocks(st);
aoqi@0 538
aoqi@0 539 st->print_cr("\n===========================");
aoqi@0 540 st->print_cr("Order of Indexed Free Lists");
aoqi@0 541 st->print_cr("=========================");
aoqi@0 542 print_indexed_free_lists(st);
aoqi@0 543
aoqi@0 544 st->print_cr("\n=================================");
aoqi@0 545 st->print_cr("Order of Free Lists in Dictionary");
aoqi@0 546 st->print_cr("=================================");
aoqi@0 547 print_dictionary_free_lists(st);
aoqi@0 548 }
aoqi@0 549
aoqi@0 550
aoqi@0 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
aoqi@0 552 assert_lock_strong(&_freelistLock);
aoqi@0 553 assert(PrintFLSStatistics != 0, "Reporting error");
aoqi@0 554 _dictionary->report_statistics();
aoqi@0 555 if (PrintFLSStatistics > 1) {
aoqi@0 556 reportIndexedFreeListStatistics();
aoqi@0 557 size_t total_size = totalSizeInIndexedFreeLists() +
aoqi@0 558 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
aoqi@0 559 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
aoqi@0 560 }
aoqi@0 561 }
aoqi@0 562
aoqi@0 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
aoqi@0 564 assert_lock_strong(&_freelistLock);
aoqi@0 565 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
aoqi@0 566 "--------------------------------\n");
aoqi@0 567 size_t total_size = totalSizeInIndexedFreeLists();
aoqi@0 568 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
aoqi@0 569 gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
aoqi@0 570 gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
aoqi@0 571 gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
aoqi@0 572 if (free_blocks != 0) {
aoqi@0 573 gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
aoqi@0 574 }
aoqi@0 575 }
aoqi@0 576
aoqi@0 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
aoqi@0 578 size_t res = 0;
aoqi@0 579 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 580 debug_only(
aoqi@0 581 ssize_t recount = 0;
aoqi@0 582 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 583 fc = fc->next()) {
aoqi@0 584 recount += 1;
aoqi@0 585 }
aoqi@0 586 assert(recount == _indexedFreeList[i].count(),
aoqi@0 587 "Incorrect count in list");
aoqi@0 588 )
aoqi@0 589 res += _indexedFreeList[i].count();
aoqi@0 590 }
aoqi@0 591 return res;
aoqi@0 592 }
aoqi@0 593
aoqi@0 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
aoqi@0 595 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
aoqi@0 596 if (_indexedFreeList[i].head() != NULL) {
aoqi@0 597 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
aoqi@0 598 return (size_t)i;
aoqi@0 599 }
aoqi@0 600 }
aoqi@0 601 return 0;
aoqi@0 602 }
aoqi@0 603
aoqi@0 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
aoqi@0 605 HeapWord* prevEnd = end();
aoqi@0 606 assert(prevEnd != value, "unnecessary set_end call");
aoqi@0 607 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
aoqi@0 608 "New end is below unallocated block");
aoqi@0 609 _end = value;
aoqi@0 610 if (prevEnd != NULL) {
aoqi@0 611 // Resize the underlying block offset table.
aoqi@0 612 _bt.resize(pointer_delta(value, bottom()));
aoqi@0 613 if (value <= prevEnd) {
aoqi@0 614 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
aoqi@0 615 "New end is below unallocated block");
aoqi@0 616 } else {
aoqi@0 617 // Now, take this new chunk and add it to the free blocks.
aoqi@0 618 // Note that the BOT has not yet been updated for this block.
aoqi@0 619 size_t newFcSize = pointer_delta(value, prevEnd);
aoqi@0 620 // XXX This is REALLY UGLY and should be fixed up. XXX
aoqi@0 621 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
aoqi@0 622 // Mark the boundary of the new block in BOT
aoqi@0 623 _bt.mark_block(prevEnd, value);
aoqi@0 624 // put it all in the linAB
aoqi@0 625 if (ParallelGCThreads == 0) {
aoqi@0 626 _smallLinearAllocBlock._ptr = prevEnd;
aoqi@0 627 _smallLinearAllocBlock._word_size = newFcSize;
aoqi@0 628 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 629 } else { // ParallelGCThreads > 0
aoqi@0 630 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 631 Mutex::_no_safepoint_check_flag);
aoqi@0 632 _smallLinearAllocBlock._ptr = prevEnd;
aoqi@0 633 _smallLinearAllocBlock._word_size = newFcSize;
aoqi@0 634 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 635 }
aoqi@0 636 // Births of chunks put into a LinAB are not recorded. Births
aoqi@0 637 // of chunks as they are allocated out of a LinAB are.
aoqi@0 638 } else {
aoqi@0 639 // Add the block to the free lists, if possible coalescing it
aoqi@0 640 // with the last free block, and update the BOT and census data.
aoqi@0 641 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
aoqi@0 642 }
aoqi@0 643 }
aoqi@0 644 }
aoqi@0 645 }
aoqi@0 646
aoqi@0 647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
aoqi@0 648 CompactibleFreeListSpace* _cfls;
aoqi@0 649 CMSCollector* _collector;
aoqi@0 650 protected:
aoqi@0 651 // Override.
aoqi@0 652 #define walk_mem_region_with_cl_DECL(ClosureType) \
aoqi@0 653 virtual void walk_mem_region_with_cl(MemRegion mr, \
aoqi@0 654 HeapWord* bottom, HeapWord* top, \
aoqi@0 655 ClosureType* cl); \
aoqi@0 656 void walk_mem_region_with_cl_par(MemRegion mr, \
aoqi@0 657 HeapWord* bottom, HeapWord* top, \
aoqi@0 658 ClosureType* cl); \
aoqi@0 659 void walk_mem_region_with_cl_nopar(MemRegion mr, \
aoqi@0 660 HeapWord* bottom, HeapWord* top, \
aoqi@0 661 ClosureType* cl)
aoqi@0 662 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
aoqi@0 663 walk_mem_region_with_cl_DECL(FilteringClosure);
aoqi@0 664
aoqi@0 665 public:
aoqi@0 666 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
aoqi@0 667 CMSCollector* collector,
aoqi@0 668 ExtendedOopClosure* cl,
aoqi@0 669 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 670 HeapWord* boundary) :
aoqi@0 671 Filtering_DCTOC(sp, cl, precision, boundary),
aoqi@0 672 _cfls(sp), _collector(collector) {}
aoqi@0 673 };
aoqi@0 674
aoqi@0 675 // We de-virtualize the block-related calls below, since we know that our
aoqi@0 676 // space is a CompactibleFreeListSpace.
aoqi@0 677
aoqi@0 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
aoqi@0 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
aoqi@0 680 HeapWord* bottom, \
aoqi@0 681 HeapWord* top, \
aoqi@0 682 ClosureType* cl) { \
aoqi@0 683 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
aoqi@0 684 if (is_par) { \
aoqi@0 685 assert(SharedHeap::heap()->n_par_threads() == \
aoqi@0 686 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
aoqi@0 687 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
aoqi@0 688 } else { \
aoqi@0 689 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
aoqi@0 690 } \
aoqi@0 691 } \
aoqi@0 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
aoqi@0 693 HeapWord* bottom, \
aoqi@0 694 HeapWord* top, \
aoqi@0 695 ClosureType* cl) { \
aoqi@0 696 /* Skip parts that are before "mr", in case "block_start" sent us \
aoqi@0 697 back too far. */ \
aoqi@0 698 HeapWord* mr_start = mr.start(); \
aoqi@0 699 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 700 HeapWord* next = bottom + bot_size; \
aoqi@0 701 while (next < mr_start) { \
aoqi@0 702 bottom = next; \
aoqi@0 703 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 704 next = bottom + bot_size; \
aoqi@0 705 } \
aoqi@0 706 \
aoqi@0 707 while (bottom < top) { \
aoqi@0 708 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
aoqi@0 709 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
aoqi@0 710 oop(bottom)) && \
aoqi@0 711 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
aoqi@0 712 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
aoqi@0 713 bottom += _cfls->adjustObjectSize(word_sz); \
aoqi@0 714 } else { \
aoqi@0 715 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 716 } \
aoqi@0 717 } \
aoqi@0 718 } \
aoqi@0 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
aoqi@0 720 HeapWord* bottom, \
aoqi@0 721 HeapWord* top, \
aoqi@0 722 ClosureType* cl) { \
aoqi@0 723 /* Skip parts that are before "mr", in case "block_start" sent us \
aoqi@0 724 back too far. */ \
aoqi@0 725 HeapWord* mr_start = mr.start(); \
aoqi@0 726 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 727 HeapWord* next = bottom + bot_size; \
aoqi@0 728 while (next < mr_start) { \
aoqi@0 729 bottom = next; \
aoqi@0 730 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 731 next = bottom + bot_size; \
aoqi@0 732 } \
aoqi@0 733 \
aoqi@0 734 while (bottom < top) { \
aoqi@0 735 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
aoqi@0 736 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
aoqi@0 737 oop(bottom)) && \
aoqi@0 738 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
aoqi@0 739 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
aoqi@0 740 bottom += _cfls->adjustObjectSize(word_sz); \
aoqi@0 741 } else { \
aoqi@0 742 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 743 } \
aoqi@0 744 } \
aoqi@0 745 }
aoqi@0 746
aoqi@0 747 // (There are only two of these, rather than N, because the split is due
aoqi@0 748 // only to the introduction of the FilteringClosure, a local part of the
aoqi@0 749 // impl of this abstraction.)
aoqi@0 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
aoqi@0 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
aoqi@0 752
aoqi@0 753 DirtyCardToOopClosure*
aoqi@0 754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
aoqi@0 755 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 756 HeapWord* boundary) {
aoqi@0 757 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
aoqi@0 758 }
aoqi@0 759
aoqi@0 760
aoqi@0 761 // Note on locking for the space iteration functions:
aoqi@0 762 // since the collector's iteration activities are concurrent with
aoqi@0 763 // allocation activities by mutators, absent a suitable mutual exclusion
aoqi@0 764 // mechanism the iterators may go awry. For instace a block being iterated
aoqi@0 765 // may suddenly be allocated or divided up and part of it allocated and
aoqi@0 766 // so on.
aoqi@0 767
aoqi@0 768 // Apply the given closure to each block in the space.
aoqi@0 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
aoqi@0 770 assert_lock_strong(freelistLock());
aoqi@0 771 HeapWord *cur, *limit;
aoqi@0 772 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 773 cur += cl->do_blk_careful(cur));
aoqi@0 774 }
aoqi@0 775
aoqi@0 776 // Apply the given closure to each block in the space.
aoqi@0 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
aoqi@0 778 assert_lock_strong(freelistLock());
aoqi@0 779 HeapWord *cur, *limit;
aoqi@0 780 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 781 cur += cl->do_blk(cur));
aoqi@0 782 }
aoqi@0 783
aoqi@0 784 // Apply the given closure to each oop in the space.
aoqi@0 785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
aoqi@0 786 assert_lock_strong(freelistLock());
aoqi@0 787 HeapWord *cur, *limit;
aoqi@0 788 size_t curSize;
aoqi@0 789 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 790 cur += curSize) {
aoqi@0 791 curSize = block_size(cur);
aoqi@0 792 if (block_is_obj(cur)) {
aoqi@0 793 oop(cur)->oop_iterate(cl);
aoqi@0 794 }
aoqi@0 795 }
aoqi@0 796 }
aoqi@0 797
aoqi@0 798 // NOTE: In the following methods, in order to safely be able to
aoqi@0 799 // apply the closure to an object, we need to be sure that the
aoqi@0 800 // object has been initialized. We are guaranteed that an object
aoqi@0 801 // is initialized if we are holding the Heap_lock with the
aoqi@0 802 // world stopped.
aoqi@0 803 void CompactibleFreeListSpace::verify_objects_initialized() const {
aoqi@0 804 if (is_init_completed()) {
aoqi@0 805 assert_locked_or_safepoint(Heap_lock);
aoqi@0 806 if (Universe::is_fully_initialized()) {
aoqi@0 807 guarantee(SafepointSynchronize::is_at_safepoint(),
aoqi@0 808 "Required for objects to be initialized");
aoqi@0 809 }
aoqi@0 810 } // else make a concession at vm start-up
aoqi@0 811 }
aoqi@0 812
aoqi@0 813 // Apply the given closure to each object in the space
aoqi@0 814 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
aoqi@0 815 assert_lock_strong(freelistLock());
aoqi@0 816 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 817 HeapWord *cur, *limit;
aoqi@0 818 size_t curSize;
aoqi@0 819 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 820 cur += curSize) {
aoqi@0 821 curSize = block_size(cur);
aoqi@0 822 if (block_is_obj(cur)) {
aoqi@0 823 blk->do_object(oop(cur));
aoqi@0 824 }
aoqi@0 825 }
aoqi@0 826 }
aoqi@0 827
aoqi@0 828 // Apply the given closure to each live object in the space
aoqi@0 829 // The usage of CompactibleFreeListSpace
aoqi@0 830 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
aoqi@0 831 // objects in the space with references to objects that are no longer
aoqi@0 832 // valid. For example, an object may reference another object
aoqi@0 833 // that has already been sweep up (collected). This method uses
aoqi@0 834 // obj_is_alive() to determine whether it is safe to apply the closure to
aoqi@0 835 // an object. See obj_is_alive() for details on how liveness of an
aoqi@0 836 // object is decided.
aoqi@0 837
aoqi@0 838 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
aoqi@0 839 assert_lock_strong(freelistLock());
aoqi@0 840 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 841 HeapWord *cur, *limit;
aoqi@0 842 size_t curSize;
aoqi@0 843 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 844 cur += curSize) {
aoqi@0 845 curSize = block_size(cur);
aoqi@0 846 if (block_is_obj(cur) && obj_is_alive(cur)) {
aoqi@0 847 blk->do_object(oop(cur));
aoqi@0 848 }
aoqi@0 849 }
aoqi@0 850 }
aoqi@0 851
aoqi@0 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
aoqi@0 853 UpwardsObjectClosure* cl) {
aoqi@0 854 assert_locked(freelistLock());
aoqi@0 855 NOT_PRODUCT(verify_objects_initialized());
mgerdin@6979 856 assert(!mr.is_empty(), "Should be non-empty");
mgerdin@6979 857 // We use MemRegion(bottom(), end()) rather than used_region() below
mgerdin@6979 858 // because the two are not necessarily equal for some kinds of
mgerdin@6979 859 // spaces, in particular, certain kinds of free list spaces.
mgerdin@6979 860 // We could use the more complicated but more precise:
mgerdin@6979 861 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
mgerdin@6979 862 // but the slight imprecision seems acceptable in the assertion check.
mgerdin@6979 863 assert(MemRegion(bottom(), end()).contains(mr),
mgerdin@6979 864 "Should be within used space");
mgerdin@6979 865 HeapWord* prev = cl->previous(); // max address from last time
mgerdin@6979 866 if (prev >= mr.end()) { // nothing to do
mgerdin@6979 867 return;
mgerdin@6979 868 }
mgerdin@6979 869 // This assert will not work when we go from cms space to perm
mgerdin@6979 870 // space, and use same closure. Easy fix deferred for later. XXX YSR
mgerdin@6979 871 // assert(prev == NULL || contains(prev), "Should be within space");
mgerdin@6979 872
mgerdin@6979 873 bool last_was_obj_array = false;
mgerdin@6979 874 HeapWord *blk_start_addr, *region_start_addr;
mgerdin@6979 875 if (prev > mr.start()) {
mgerdin@6979 876 region_start_addr = prev;
mgerdin@6979 877 blk_start_addr = prev;
mgerdin@6979 878 // The previous invocation may have pushed "prev" beyond the
mgerdin@6979 879 // last allocated block yet there may be still be blocks
mgerdin@6979 880 // in this region due to a particular coalescing policy.
mgerdin@6979 881 // Relax the assertion so that the case where the unallocated
mgerdin@6979 882 // block is maintained and "prev" is beyond the unallocated
mgerdin@6979 883 // block does not cause the assertion to fire.
mgerdin@6979 884 assert((BlockOffsetArrayUseUnallocatedBlock &&
mgerdin@6979 885 (!is_in(prev))) ||
mgerdin@6979 886 (blk_start_addr == block_start(region_start_addr)), "invariant");
mgerdin@6979 887 } else {
mgerdin@6979 888 region_start_addr = mr.start();
mgerdin@6979 889 blk_start_addr = block_start(region_start_addr);
mgerdin@6979 890 }
mgerdin@6979 891 HeapWord* region_end_addr = mr.end();
mgerdin@6979 892 MemRegion derived_mr(region_start_addr, region_end_addr);
mgerdin@6979 893 while (blk_start_addr < region_end_addr) {
mgerdin@6979 894 const size_t size = block_size(blk_start_addr);
mgerdin@6979 895 if (block_is_obj(blk_start_addr)) {
mgerdin@6979 896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
mgerdin@6979 897 } else {
mgerdin@6979 898 last_was_obj_array = false;
mgerdin@6979 899 }
mgerdin@6979 900 blk_start_addr += size;
mgerdin@6979 901 }
mgerdin@6979 902 if (!last_was_obj_array) {
mgerdin@6979 903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
mgerdin@6979 904 "Should be within (closed) used space");
mgerdin@6979 905 assert(blk_start_addr > prev, "Invariant");
mgerdin@6979 906 cl->set_previous(blk_start_addr); // min address for next time
mgerdin@6979 907 }
aoqi@0 908 }
aoqi@0 909
aoqi@0 910
aoqi@0 911 // Callers of this iterator beware: The closure application should
aoqi@0 912 // be robust in the face of uninitialized objects and should (always)
aoqi@0 913 // return a correct size so that the next addr + size below gives us a
aoqi@0 914 // valid block boundary. [See for instance,
aoqi@0 915 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
aoqi@0 916 // in ConcurrentMarkSweepGeneration.cpp.]
aoqi@0 917 HeapWord*
aoqi@0 918 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
aoqi@0 919 ObjectClosureCareful* cl) {
aoqi@0 920 assert_lock_strong(freelistLock());
aoqi@0 921 // Can't use used_region() below because it may not necessarily
aoqi@0 922 // be the same as [bottom(),end()); although we could
aoqi@0 923 // use [used_region().start(),round_to(used_region().end(),CardSize)),
aoqi@0 924 // that appears too cumbersome, so we just do the simpler check
aoqi@0 925 // in the assertion below.
aoqi@0 926 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
aoqi@0 927 "mr should be non-empty and within used space");
aoqi@0 928 HeapWord *addr, *end;
aoqi@0 929 size_t size;
aoqi@0 930 for (addr = block_start_careful(mr.start()), end = mr.end();
aoqi@0 931 addr < end; addr += size) {
aoqi@0 932 FreeChunk* fc = (FreeChunk*)addr;
aoqi@0 933 if (fc->is_free()) {
aoqi@0 934 // Since we hold the free list lock, which protects direct
aoqi@0 935 // allocation in this generation by mutators, a free object
aoqi@0 936 // will remain free throughout this iteration code.
aoqi@0 937 size = fc->size();
aoqi@0 938 } else {
aoqi@0 939 // Note that the object need not necessarily be initialized,
aoqi@0 940 // because (for instance) the free list lock does NOT protect
aoqi@0 941 // object initialization. The closure application below must
aoqi@0 942 // therefore be correct in the face of uninitialized objects.
aoqi@0 943 size = cl->do_object_careful_m(oop(addr), mr);
aoqi@0 944 if (size == 0) {
aoqi@0 945 // An unparsable object found. Signal early termination.
aoqi@0 946 return addr;
aoqi@0 947 }
aoqi@0 948 }
aoqi@0 949 }
aoqi@0 950 return NULL;
aoqi@0 951 }
aoqi@0 952
aoqi@0 953
aoqi@0 954 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
aoqi@0 955 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 956 return _bt.block_start(p);
aoqi@0 957 }
aoqi@0 958
aoqi@0 959 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
aoqi@0 960 return _bt.block_start_careful(p);
aoqi@0 961 }
aoqi@0 962
aoqi@0 963 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
aoqi@0 964 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 965 // This must be volatile, or else there is a danger that the compiler
aoqi@0 966 // will compile the code below into a sometimes-infinite loop, by keeping
aoqi@0 967 // the value read the first time in a register.
aoqi@0 968 while (true) {
aoqi@0 969 // We must do this until we get a consistent view of the object.
aoqi@0 970 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 971 volatile FreeChunk* fc = (volatile FreeChunk*)p;
aoqi@0 972 size_t res = fc->size();
aoqi@0 973
aoqi@0 974 // Bugfix for systems with weak memory model (PPC64/IA64). The
aoqi@0 975 // block's free bit was set and we have read the size of the
aoqi@0 976 // block. Acquire and check the free bit again. If the block is
aoqi@0 977 // still free, the read size is correct.
aoqi@0 978 OrderAccess::acquire();
aoqi@0 979
aoqi@0 980 // If the object is still a free chunk, return the size, else it
aoqi@0 981 // has been allocated so try again.
aoqi@0 982 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 983 assert(res != 0, "Block size should not be 0");
aoqi@0 984 return res;
aoqi@0 985 }
aoqi@0 986 } else {
aoqi@0 987 // must read from what 'p' points to in each loop.
aoqi@0 988 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
aoqi@0 989 if (k != NULL) {
aoqi@0 990 assert(k->is_klass(), "Should really be klass oop.");
aoqi@0 991 oop o = (oop)p;
aoqi@0 992 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
aoqi@0 993
aoqi@0 994 // Bugfix for systems with weak memory model (PPC64/IA64).
aoqi@0 995 // The object o may be an array. Acquire to make sure that the array
aoqi@0 996 // size (third word) is consistent.
aoqi@0 997 OrderAccess::acquire();
aoqi@0 998
aoqi@0 999 size_t res = o->size_given_klass(k);
aoqi@0 1000 res = adjustObjectSize(res);
aoqi@0 1001 assert(res != 0, "Block size should not be 0");
aoqi@0 1002 return res;
aoqi@0 1003 }
aoqi@0 1004 }
aoqi@0 1005 }
aoqi@0 1006 }
aoqi@0 1007
aoqi@0 1008 // TODO: Now that is_parsable is gone, we should combine these two functions.
aoqi@0 1009 // A variant of the above that uses the Printezis bits for
aoqi@0 1010 // unparsable but allocated objects. This avoids any possible
aoqi@0 1011 // stalls waiting for mutators to initialize objects, and is
aoqi@0 1012 // thus potentially faster than the variant above. However,
aoqi@0 1013 // this variant may return a zero size for a block that is
aoqi@0 1014 // under mutation and for which a consistent size cannot be
aoqi@0 1015 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
aoqi@0 1016 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
aoqi@0 1017 const CMSCollector* c)
aoqi@0 1018 const {
aoqi@0 1019 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
aoqi@0 1020 // This must be volatile, or else there is a danger that the compiler
aoqi@0 1021 // will compile the code below into a sometimes-infinite loop, by keeping
aoqi@0 1022 // the value read the first time in a register.
aoqi@0 1023 DEBUG_ONLY(uint loops = 0;)
aoqi@0 1024 while (true) {
aoqi@0 1025 // We must do this until we get a consistent view of the object.
aoqi@0 1026 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 1027 volatile FreeChunk* fc = (volatile FreeChunk*)p;
aoqi@0 1028 size_t res = fc->size();
aoqi@0 1029
aoqi@0 1030 // Bugfix for systems with weak memory model (PPC64/IA64). The
aoqi@0 1031 // free bit of the block was set and we have read the size of
aoqi@0 1032 // the block. Acquire and check the free bit again. If the
aoqi@0 1033 // block is still free, the read size is correct.
aoqi@0 1034 OrderAccess::acquire();
aoqi@0 1035
aoqi@0 1036 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 1037 assert(res != 0, "Block size should not be 0");
aoqi@0 1038 assert(loops == 0, "Should be 0");
aoqi@0 1039 return res;
aoqi@0 1040 }
aoqi@0 1041 } else {
aoqi@0 1042 // must read from what 'p' points to in each loop.
aoqi@0 1043 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
aoqi@0 1044 // We trust the size of any object that has a non-NULL
aoqi@0 1045 // klass and (for those in the perm gen) is parsable
aoqi@0 1046 // -- irrespective of its conc_safe-ty.
aoqi@0 1047 if (k != NULL) {
aoqi@0 1048 assert(k->is_klass(), "Should really be klass oop.");
aoqi@0 1049 oop o = (oop)p;
aoqi@0 1050 assert(o->is_oop(), "Should be an oop");
aoqi@0 1051
aoqi@0 1052 // Bugfix for systems with weak memory model (PPC64/IA64).
aoqi@0 1053 // The object o may be an array. Acquire to make sure that the array
aoqi@0 1054 // size (third word) is consistent.
aoqi@0 1055 OrderAccess::acquire();
aoqi@0 1056
aoqi@0 1057 size_t res = o->size_given_klass(k);
aoqi@0 1058 res = adjustObjectSize(res);
aoqi@0 1059 assert(res != 0, "Block size should not be 0");
aoqi@0 1060 return res;
aoqi@0 1061 } else {
aoqi@0 1062 // May return 0 if P-bits not present.
aoqi@0 1063 return c->block_size_if_printezis_bits(p);
aoqi@0 1064 }
aoqi@0 1065 }
aoqi@0 1066 assert(loops == 0, "Can loop at most once");
aoqi@0 1067 DEBUG_ONLY(loops++;)
aoqi@0 1068 }
aoqi@0 1069 }
aoqi@0 1070
aoqi@0 1071 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
aoqi@0 1072 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 1073 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
aoqi@0 1074 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1075 if (fc->is_free()) {
aoqi@0 1076 return fc->size();
aoqi@0 1077 } else {
aoqi@0 1078 // Ignore mark word because this may be a recently promoted
aoqi@0 1079 // object whose mark word is used to chain together grey
aoqi@0 1080 // objects (the last one would have a null value).
aoqi@0 1081 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1082 return adjustObjectSize(oop(p)->size());
aoqi@0 1083 }
aoqi@0 1084 }
aoqi@0 1085
aoqi@0 1086 // This implementation assumes that the property of "being an object" is
aoqi@0 1087 // stable. But being a free chunk may not be (because of parallel
aoqi@0 1088 // promotion.)
aoqi@0 1089 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
aoqi@0 1090 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1091 assert(is_in_reserved(p), "Should be in space");
aoqi@0 1092 // When doing a mark-sweep-compact of the CMS generation, this
aoqi@0 1093 // assertion may fail because prepare_for_compaction() uses
aoqi@0 1094 // space that is garbage to maintain information on ranges of
aoqi@0 1095 // live objects so that these live ranges can be moved as a whole.
aoqi@0 1096 // Comment out this assertion until that problem can be solved
aoqi@0 1097 // (i.e., that the block start calculation may look at objects
aoqi@0 1098 // at address below "p" in finding the object that contains "p"
aoqi@0 1099 // and those objects (if garbage) may have been modified to hold
aoqi@0 1100 // live range information.
aoqi@0 1101 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
aoqi@0 1102 // "Should be a block boundary");
aoqi@0 1103 if (FreeChunk::indicatesFreeChunk(p)) return false;
aoqi@0 1104 Klass* k = oop(p)->klass_or_null();
aoqi@0 1105 if (k != NULL) {
aoqi@0 1106 // Ignore mark word because it may have been used to
aoqi@0 1107 // chain together promoted objects (the last one
aoqi@0 1108 // would have a null value).
aoqi@0 1109 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1110 return true;
aoqi@0 1111 } else {
aoqi@0 1112 return false; // Was not an object at the start of collection.
aoqi@0 1113 }
aoqi@0 1114 }
aoqi@0 1115
aoqi@0 1116 // Check if the object is alive. This fact is checked either by consulting
aoqi@0 1117 // the main marking bitmap in the sweeping phase or, if it's a permanent
aoqi@0 1118 // generation and we're not in the sweeping phase, by checking the
aoqi@0 1119 // perm_gen_verify_bit_map where we store the "deadness" information if
aoqi@0 1120 // we did not sweep the perm gen in the most recent previous GC cycle.
aoqi@0 1121 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
aoqi@0 1122 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
aoqi@0 1123 "Else races are possible");
aoqi@0 1124 assert(block_is_obj(p), "The address should point to an object");
aoqi@0 1125
aoqi@0 1126 // If we're sweeping, we use object liveness information from the main bit map
aoqi@0 1127 // for both perm gen and old gen.
aoqi@0 1128 // We don't need to lock the bitmap (live_map or dead_map below), because
aoqi@0 1129 // EITHER we are in the middle of the sweeping phase, and the
aoqi@0 1130 // main marking bit map (live_map below) is locked,
aoqi@0 1131 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
aoqi@0 1132 // is stable, because it's mutated only in the sweeping phase.
aoqi@0 1133 // NOTE: This method is also used by jmap where, if class unloading is
aoqi@0 1134 // off, the results can return "false" for legitimate perm objects,
aoqi@0 1135 // when we are not in the midst of a sweeping phase, which can result
aoqi@0 1136 // in jmap not reporting certain perm gen objects. This will be moot
aoqi@0 1137 // if/when the perm gen goes away in the future.
aoqi@0 1138 if (_collector->abstract_state() == CMSCollector::Sweeping) {
aoqi@0 1139 CMSBitMap* live_map = _collector->markBitMap();
aoqi@0 1140 return live_map->par_isMarked((HeapWord*) p);
aoqi@0 1141 }
aoqi@0 1142 return true;
aoqi@0 1143 }
aoqi@0 1144
aoqi@0 1145 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
aoqi@0 1146 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1147 assert(is_in_reserved(p), "Should be in space");
aoqi@0 1148 assert(_bt.block_start(p) == p, "Should be a block boundary");
aoqi@0 1149 if (!fc->is_free()) {
aoqi@0 1150 // Ignore mark word because it may have been used to
aoqi@0 1151 // chain together promoted objects (the last one
aoqi@0 1152 // would have a null value).
aoqi@0 1153 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1154 return true;
aoqi@0 1155 }
aoqi@0 1156 return false;
aoqi@0 1157 }
aoqi@0 1158
aoqi@0 1159 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
aoqi@0 1160 // approximate answer if you don't hold the freelistlock when you call this.
aoqi@0 1161 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
aoqi@0 1162 size_t size = 0;
aoqi@0 1163 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 1164 debug_only(
aoqi@0 1165 // We may be calling here without the lock in which case we
aoqi@0 1166 // won't do this modest sanity check.
aoqi@0 1167 if (freelistLock()->owned_by_self()) {
aoqi@0 1168 size_t total_list_size = 0;
aoqi@0 1169 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 1170 fc = fc->next()) {
aoqi@0 1171 total_list_size += i;
aoqi@0 1172 }
aoqi@0 1173 assert(total_list_size == i * _indexedFreeList[i].count(),
aoqi@0 1174 "Count in list is incorrect");
aoqi@0 1175 }
aoqi@0 1176 )
aoqi@0 1177 size += i * _indexedFreeList[i].count();
aoqi@0 1178 }
aoqi@0 1179 return size;
aoqi@0 1180 }
aoqi@0 1181
aoqi@0 1182 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
aoqi@0 1183 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
aoqi@0 1184 return allocate(size);
aoqi@0 1185 }
aoqi@0 1186
aoqi@0 1187 HeapWord*
aoqi@0 1188 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
aoqi@0 1189 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
aoqi@0 1190 }
aoqi@0 1191
aoqi@0 1192 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
aoqi@0 1193 assert_lock_strong(freelistLock());
aoqi@0 1194 HeapWord* res = NULL;
aoqi@0 1195 assert(size == adjustObjectSize(size),
aoqi@0 1196 "use adjustObjectSize() before calling into allocate()");
aoqi@0 1197
aoqi@0 1198 if (_adaptive_freelists) {
aoqi@0 1199 res = allocate_adaptive_freelists(size);
aoqi@0 1200 } else { // non-adaptive free lists
aoqi@0 1201 res = allocate_non_adaptive_freelists(size);
aoqi@0 1202 }
aoqi@0 1203
aoqi@0 1204 if (res != NULL) {
aoqi@0 1205 // check that res does lie in this space!
aoqi@0 1206 assert(is_in_reserved(res), "Not in this space!");
aoqi@0 1207 assert(is_aligned((void*)res), "alignment check");
aoqi@0 1208
aoqi@0 1209 FreeChunk* fc = (FreeChunk*)res;
aoqi@0 1210 fc->markNotFree();
aoqi@0 1211 assert(!fc->is_free(), "shouldn't be marked free");
aoqi@0 1212 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
aoqi@0 1213 // Verify that the block offset table shows this to
aoqi@0 1214 // be a single block, but not one which is unallocated.
aoqi@0 1215 _bt.verify_single_block(res, size);
aoqi@0 1216 _bt.verify_not_unallocated(res, size);
aoqi@0 1217 // mangle a just allocated object with a distinct pattern.
aoqi@0 1218 debug_only(fc->mangleAllocated(size));
aoqi@0 1219 }
aoqi@0 1220
aoqi@0 1221 return res;
aoqi@0 1222 }
aoqi@0 1223
aoqi@0 1224 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
aoqi@0 1225 HeapWord* res = NULL;
aoqi@0 1226 // try and use linear allocation for smaller blocks
aoqi@0 1227 if (size < _smallLinearAllocBlock._allocation_size_limit) {
aoqi@0 1228 // if successful, the following also adjusts block offset table
aoqi@0 1229 res = getChunkFromSmallLinearAllocBlock(size);
aoqi@0 1230 }
aoqi@0 1231 // Else triage to indexed lists for smaller sizes
aoqi@0 1232 if (res == NULL) {
aoqi@0 1233 if (size < SmallForDictionary) {
aoqi@0 1234 res = (HeapWord*) getChunkFromIndexedFreeList(size);
aoqi@0 1235 } else {
aoqi@0 1236 // else get it from the big dictionary; if even this doesn't
aoqi@0 1237 // work we are out of luck.
aoqi@0 1238 res = (HeapWord*)getChunkFromDictionaryExact(size);
aoqi@0 1239 }
aoqi@0 1240 }
aoqi@0 1241
aoqi@0 1242 return res;
aoqi@0 1243 }
aoqi@0 1244
aoqi@0 1245 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
aoqi@0 1246 assert_lock_strong(freelistLock());
aoqi@0 1247 HeapWord* res = NULL;
aoqi@0 1248 assert(size == adjustObjectSize(size),
aoqi@0 1249 "use adjustObjectSize() before calling into allocate()");
aoqi@0 1250
aoqi@0 1251 // Strategy
aoqi@0 1252 // if small
aoqi@0 1253 // exact size from small object indexed list if small
aoqi@0 1254 // small or large linear allocation block (linAB) as appropriate
aoqi@0 1255 // take from lists of greater sized chunks
aoqi@0 1256 // else
aoqi@0 1257 // dictionary
aoqi@0 1258 // small or large linear allocation block if it has the space
aoqi@0 1259 // Try allocating exact size from indexTable first
aoqi@0 1260 if (size < IndexSetSize) {
aoqi@0 1261 res = (HeapWord*) getChunkFromIndexedFreeList(size);
aoqi@0 1262 if(res != NULL) {
aoqi@0 1263 assert(res != (HeapWord*)_indexedFreeList[size].head(),
aoqi@0 1264 "Not removed from free list");
aoqi@0 1265 // no block offset table adjustment is necessary on blocks in
aoqi@0 1266 // the indexed lists.
aoqi@0 1267
aoqi@0 1268 // Try allocating from the small LinAB
aoqi@0 1269 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
aoqi@0 1270 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
aoqi@0 1271 // if successful, the above also adjusts block offset table
aoqi@0 1272 // Note that this call will refill the LinAB to
aoqi@0 1273 // satisfy the request. This is different that
aoqi@0 1274 // evm.
aoqi@0 1275 // Don't record chunk off a LinAB? smallSplitBirth(size);
aoqi@0 1276 } else {
aoqi@0 1277 // Raid the exact free lists larger than size, even if they are not
aoqi@0 1278 // overpopulated.
aoqi@0 1279 res = (HeapWord*) getChunkFromGreater(size);
aoqi@0 1280 }
aoqi@0 1281 } else {
aoqi@0 1282 // Big objects get allocated directly from the dictionary.
aoqi@0 1283 res = (HeapWord*) getChunkFromDictionaryExact(size);
aoqi@0 1284 if (res == NULL) {
aoqi@0 1285 // Try hard not to fail since an allocation failure will likely
aoqi@0 1286 // trigger a synchronous GC. Try to get the space from the
aoqi@0 1287 // allocation blocks.
aoqi@0 1288 res = getChunkFromSmallLinearAllocBlockRemainder(size);
aoqi@0 1289 }
aoqi@0 1290 }
aoqi@0 1291
aoqi@0 1292 return res;
aoqi@0 1293 }
aoqi@0 1294
aoqi@0 1295 // A worst-case estimate of the space required (in HeapWords) to expand the heap
aoqi@0 1296 // when promoting obj.
aoqi@0 1297 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
aoqi@0 1298 // Depending on the object size, expansion may require refilling either a
aoqi@0 1299 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
aoqi@0 1300 // is added because the dictionary may over-allocate to avoid fragmentation.
aoqi@0 1301 size_t space = obj_size;
aoqi@0 1302 if (!_adaptive_freelists) {
aoqi@0 1303 space = MAX2(space, _smallLinearAllocBlock._refillSize);
aoqi@0 1304 }
aoqi@0 1305 space += _promoInfo.refillSize() + 2 * MinChunkSize;
aoqi@0 1306 return space;
aoqi@0 1307 }
aoqi@0 1308
aoqi@0 1309 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
aoqi@0 1310 FreeChunk* ret;
aoqi@0 1311
aoqi@0 1312 assert(numWords >= MinChunkSize, "Size is less than minimum");
aoqi@0 1313 assert(linearAllocationWouldFail() || bestFitFirst(),
aoqi@0 1314 "Should not be here");
aoqi@0 1315
aoqi@0 1316 size_t i;
aoqi@0 1317 size_t currSize = numWords + MinChunkSize;
aoqi@0 1318 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
aoqi@0 1319 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 1320 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
aoqi@0 1321 if (fl->head()) {
aoqi@0 1322 ret = getFromListGreater(fl, numWords);
aoqi@0 1323 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
aoqi@0 1324 return ret;
aoqi@0 1325 }
aoqi@0 1326 }
aoqi@0 1327
aoqi@0 1328 currSize = MAX2((size_t)SmallForDictionary,
aoqi@0 1329 (size_t)(numWords + MinChunkSize));
aoqi@0 1330
aoqi@0 1331 /* Try to get a chunk that satisfies request, while avoiding
aoqi@0 1332 fragmentation that can't be handled. */
aoqi@0 1333 {
aoqi@0 1334 ret = dictionary()->get_chunk(currSize);
aoqi@0 1335 if (ret != NULL) {
aoqi@0 1336 assert(ret->size() - numWords >= MinChunkSize,
aoqi@0 1337 "Chunk is too small");
aoqi@0 1338 _bt.allocated((HeapWord*)ret, ret->size());
aoqi@0 1339 /* Carve returned chunk. */
aoqi@0 1340 (void) splitChunkAndReturnRemainder(ret, numWords);
aoqi@0 1341 /* Label this as no longer a free chunk. */
aoqi@0 1342 assert(ret->is_free(), "This chunk should be free");
aoqi@0 1343 ret->link_prev(NULL);
aoqi@0 1344 }
aoqi@0 1345 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
aoqi@0 1346 return ret;
aoqi@0 1347 }
aoqi@0 1348 ShouldNotReachHere();
aoqi@0 1349 }
aoqi@0 1350
aoqi@0 1351 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
aoqi@0 1352 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
aoqi@0 1353 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
aoqi@0 1354 }
aoqi@0 1355
aoqi@0 1356 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
aoqi@0 1357 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
aoqi@0 1358 (_smallLinearAllocBlock._word_size == fc->size()),
aoqi@0 1359 "Linear allocation block shows incorrect size");
aoqi@0 1360 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
aoqi@0 1361 (_smallLinearAllocBlock._word_size == fc->size()));
aoqi@0 1362 }
aoqi@0 1363
aoqi@0 1364 // Check if the purported free chunk is present either as a linear
aoqi@0 1365 // allocation block, the size-indexed table of (smaller) free blocks,
aoqi@0 1366 // or the larger free blocks kept in the binary tree dictionary.
aoqi@0 1367 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
aoqi@0 1368 if (verify_chunk_is_linear_alloc_block(fc)) {
aoqi@0 1369 return true;
aoqi@0 1370 } else if (fc->size() < IndexSetSize) {
aoqi@0 1371 return verifyChunkInIndexedFreeLists(fc);
aoqi@0 1372 } else {
aoqi@0 1373 return dictionary()->verify_chunk_in_free_list(fc);
aoqi@0 1374 }
aoqi@0 1375 }
aoqi@0 1376
aoqi@0 1377 #ifndef PRODUCT
aoqi@0 1378 void CompactibleFreeListSpace::assert_locked() const {
aoqi@0 1379 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
aoqi@0 1380 }
aoqi@0 1381
aoqi@0 1382 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
aoqi@0 1383 CMSLockVerifier::assert_locked(lock);
aoqi@0 1384 }
aoqi@0 1385 #endif
aoqi@0 1386
aoqi@0 1387 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
aoqi@0 1388 // In the parallel case, the main thread holds the free list lock
aoqi@0 1389 // on behalf the parallel threads.
aoqi@0 1390 FreeChunk* fc;
aoqi@0 1391 {
aoqi@0 1392 // If GC is parallel, this might be called by several threads.
aoqi@0 1393 // This should be rare enough that the locking overhead won't affect
aoqi@0 1394 // the sequential code.
aoqi@0 1395 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 1396 Mutex::_no_safepoint_check_flag);
aoqi@0 1397 fc = getChunkFromDictionary(size);
aoqi@0 1398 }
aoqi@0 1399 if (fc != NULL) {
aoqi@0 1400 fc->dontCoalesce();
aoqi@0 1401 assert(fc->is_free(), "Should be free, but not coalescable");
aoqi@0 1402 // Verify that the block offset table shows this to
aoqi@0 1403 // be a single block, but not one which is unallocated.
aoqi@0 1404 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 1405 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 1406 }
aoqi@0 1407 return fc;
aoqi@0 1408 }
aoqi@0 1409
aoqi@0 1410 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
aoqi@0 1411 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
aoqi@0 1412 assert_locked();
aoqi@0 1413
aoqi@0 1414 // if we are tracking promotions, then first ensure space for
aoqi@0 1415 // promotion (including spooling space for saving header if necessary).
aoqi@0 1416 // then allocate and copy, then track promoted info if needed.
aoqi@0 1417 // When tracking (see PromotionInfo::track()), the mark word may
aoqi@0 1418 // be displaced and in this case restoration of the mark word
aoqi@0 1419 // occurs in the (oop_since_save_marks_)iterate phase.
aoqi@0 1420 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
aoqi@0 1421 return NULL;
aoqi@0 1422 }
aoqi@0 1423 // Call the allocate(size_t, bool) form directly to avoid the
aoqi@0 1424 // additional call through the allocate(size_t) form. Having
aoqi@0 1425 // the compile inline the call is problematic because allocate(size_t)
aoqi@0 1426 // is a virtual method.
aoqi@0 1427 HeapWord* res = allocate(adjustObjectSize(obj_size));
aoqi@0 1428 if (res != NULL) {
aoqi@0 1429 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
aoqi@0 1430 // if we should be tracking promotions, do so.
aoqi@0 1431 if (_promoInfo.tracking()) {
aoqi@0 1432 _promoInfo.track((PromotedObject*)res);
aoqi@0 1433 }
aoqi@0 1434 }
aoqi@0 1435 return oop(res);
aoqi@0 1436 }
aoqi@0 1437
aoqi@0 1438 HeapWord*
aoqi@0 1439 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
aoqi@0 1440 assert_locked();
aoqi@0 1441 assert(size >= MinChunkSize, "minimum chunk size");
aoqi@0 1442 assert(size < _smallLinearAllocBlock._allocation_size_limit,
aoqi@0 1443 "maximum from smallLinearAllocBlock");
aoqi@0 1444 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
aoqi@0 1445 }
aoqi@0 1446
aoqi@0 1447 HeapWord*
aoqi@0 1448 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
aoqi@0 1449 size_t size) {
aoqi@0 1450 assert_locked();
aoqi@0 1451 assert(size >= MinChunkSize, "too small");
aoqi@0 1452 HeapWord* res = NULL;
aoqi@0 1453 // Try to do linear allocation from blk, making sure that
aoqi@0 1454 if (blk->_word_size == 0) {
aoqi@0 1455 // We have probably been unable to fill this either in the prologue or
aoqi@0 1456 // when it was exhausted at the last linear allocation. Bail out until
aoqi@0 1457 // next time.
aoqi@0 1458 assert(blk->_ptr == NULL, "consistency check");
aoqi@0 1459 return NULL;
aoqi@0 1460 }
aoqi@0 1461 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
aoqi@0 1462 res = getChunkFromLinearAllocBlockRemainder(blk, size);
aoqi@0 1463 if (res != NULL) return res;
aoqi@0 1464
aoqi@0 1465 // about to exhaust this linear allocation block
aoqi@0 1466 if (blk->_word_size == size) { // exactly satisfied
aoqi@0 1467 res = blk->_ptr;
aoqi@0 1468 _bt.allocated(res, blk->_word_size);
aoqi@0 1469 } else if (size + MinChunkSize <= blk->_refillSize) {
aoqi@0 1470 size_t sz = blk->_word_size;
aoqi@0 1471 // Update _unallocated_block if the size is such that chunk would be
aoqi@0 1472 // returned to the indexed free list. All other chunks in the indexed
aoqi@0 1473 // free lists are allocated from the dictionary so that _unallocated_block
aoqi@0 1474 // has already been adjusted for them. Do it here so that the cost
aoqi@0 1475 // for all chunks added back to the indexed free lists.
aoqi@0 1476 if (sz < SmallForDictionary) {
aoqi@0 1477 _bt.allocated(blk->_ptr, sz);
aoqi@0 1478 }
aoqi@0 1479 // Return the chunk that isn't big enough, and then refill below.
aoqi@0 1480 addChunkToFreeLists(blk->_ptr, sz);
aoqi@0 1481 split_birth(sz);
aoqi@0 1482 // Don't keep statistics on adding back chunk from a LinAB.
aoqi@0 1483 } else {
aoqi@0 1484 // A refilled block would not satisfy the request.
aoqi@0 1485 return NULL;
aoqi@0 1486 }
aoqi@0 1487
aoqi@0 1488 blk->_ptr = NULL; blk->_word_size = 0;
aoqi@0 1489 refillLinearAllocBlock(blk);
aoqi@0 1490 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
aoqi@0 1491 "block was replenished");
aoqi@0 1492 if (res != NULL) {
aoqi@0 1493 split_birth(size);
aoqi@0 1494 repairLinearAllocBlock(blk);
aoqi@0 1495 } else if (blk->_ptr != NULL) {
aoqi@0 1496 res = blk->_ptr;
aoqi@0 1497 size_t blk_size = blk->_word_size;
aoqi@0 1498 blk->_word_size -= size;
aoqi@0 1499 blk->_ptr += size;
aoqi@0 1500 split_birth(size);
aoqi@0 1501 repairLinearAllocBlock(blk);
aoqi@0 1502 // Update BOT last so that other (parallel) GC threads see a consistent
aoqi@0 1503 // view of the BOT and free blocks.
aoqi@0 1504 // Above must occur before BOT is updated below.
aoqi@0 1505 OrderAccess::storestore();
aoqi@0 1506 _bt.split_block(res, blk_size, size); // adjust block offset table
aoqi@0 1507 }
aoqi@0 1508 return res;
aoqi@0 1509 }
aoqi@0 1510
aoqi@0 1511 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
aoqi@0 1512 LinearAllocBlock* blk,
aoqi@0 1513 size_t size) {
aoqi@0 1514 assert_locked();
aoqi@0 1515 assert(size >= MinChunkSize, "too small");
aoqi@0 1516
aoqi@0 1517 HeapWord* res = NULL;
aoqi@0 1518 // This is the common case. Keep it simple.
aoqi@0 1519 if (blk->_word_size >= size + MinChunkSize) {
aoqi@0 1520 assert(blk->_ptr != NULL, "consistency check");
aoqi@0 1521 res = blk->_ptr;
aoqi@0 1522 // Note that the BOT is up-to-date for the linAB before allocation. It
aoqi@0 1523 // indicates the start of the linAB. The split_block() updates the
aoqi@0 1524 // BOT for the linAB after the allocation (indicates the start of the
aoqi@0 1525 // next chunk to be allocated).
aoqi@0 1526 size_t blk_size = blk->_word_size;
aoqi@0 1527 blk->_word_size -= size;
aoqi@0 1528 blk->_ptr += size;
aoqi@0 1529 split_birth(size);
aoqi@0 1530 repairLinearAllocBlock(blk);
aoqi@0 1531 // Update BOT last so that other (parallel) GC threads see a consistent
aoqi@0 1532 // view of the BOT and free blocks.
aoqi@0 1533 // Above must occur before BOT is updated below.
aoqi@0 1534 OrderAccess::storestore();
aoqi@0 1535 _bt.split_block(res, blk_size, size); // adjust block offset table
aoqi@0 1536 _bt.allocated(res, size);
aoqi@0 1537 }
aoqi@0 1538 return res;
aoqi@0 1539 }
aoqi@0 1540
aoqi@0 1541 FreeChunk*
aoqi@0 1542 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
aoqi@0 1543 assert_locked();
aoqi@0 1544 assert(size < SmallForDictionary, "just checking");
aoqi@0 1545 FreeChunk* res;
aoqi@0 1546 res = _indexedFreeList[size].get_chunk_at_head();
aoqi@0 1547 if (res == NULL) {
aoqi@0 1548 res = getChunkFromIndexedFreeListHelper(size);
aoqi@0 1549 }
aoqi@0 1550 _bt.verify_not_unallocated((HeapWord*) res, size);
aoqi@0 1551 assert(res == NULL || res->size() == size, "Incorrect block size");
aoqi@0 1552 return res;
aoqi@0 1553 }
aoqi@0 1554
aoqi@0 1555 FreeChunk*
aoqi@0 1556 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
aoqi@0 1557 bool replenish) {
aoqi@0 1558 assert_locked();
aoqi@0 1559 FreeChunk* fc = NULL;
aoqi@0 1560 if (size < SmallForDictionary) {
aoqi@0 1561 assert(_indexedFreeList[size].head() == NULL ||
aoqi@0 1562 _indexedFreeList[size].surplus() <= 0,
aoqi@0 1563 "List for this size should be empty or under populated");
aoqi@0 1564 // Try best fit in exact lists before replenishing the list
aoqi@0 1565 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
aoqi@0 1566 // Replenish list.
aoqi@0 1567 //
aoqi@0 1568 // Things tried that failed.
aoqi@0 1569 // Tried allocating out of the two LinAB's first before
aoqi@0 1570 // replenishing lists.
aoqi@0 1571 // Tried small linAB of size 256 (size in indexed list)
aoqi@0 1572 // and replenishing indexed lists from the small linAB.
aoqi@0 1573 //
aoqi@0 1574 FreeChunk* newFc = NULL;
aoqi@0 1575 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
aoqi@0 1576 if (replenish_size < SmallForDictionary) {
aoqi@0 1577 // Do not replenish from an underpopulated size.
aoqi@0 1578 if (_indexedFreeList[replenish_size].surplus() > 0 &&
aoqi@0 1579 _indexedFreeList[replenish_size].head() != NULL) {
aoqi@0 1580 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
aoqi@0 1581 } else if (bestFitFirst()) {
aoqi@0 1582 newFc = bestFitSmall(replenish_size);
aoqi@0 1583 }
aoqi@0 1584 }
aoqi@0 1585 if (newFc == NULL && replenish_size > size) {
aoqi@0 1586 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
aoqi@0 1587 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
aoqi@0 1588 }
aoqi@0 1589 // Note: The stats update re split-death of block obtained above
aoqi@0 1590 // will be recorded below precisely when we know we are going to
aoqi@0 1591 // be actually splitting it into more than one pieces below.
aoqi@0 1592 if (newFc != NULL) {
aoqi@0 1593 if (replenish || CMSReplenishIntermediate) {
aoqi@0 1594 // Replenish this list and return one block to caller.
aoqi@0 1595 size_t i;
aoqi@0 1596 FreeChunk *curFc, *nextFc;
aoqi@0 1597 size_t num_blk = newFc->size() / size;
aoqi@0 1598 assert(num_blk >= 1, "Smaller than requested?");
aoqi@0 1599 assert(newFc->size() % size == 0, "Should be integral multiple of request");
aoqi@0 1600 if (num_blk > 1) {
aoqi@0 1601 // we are sure we will be splitting the block just obtained
aoqi@0 1602 // into multiple pieces; record the split-death of the original
aoqi@0 1603 splitDeath(replenish_size);
aoqi@0 1604 }
aoqi@0 1605 // carve up and link blocks 0, ..., num_blk - 2
aoqi@0 1606 // The last chunk is not added to the lists but is returned as the
aoqi@0 1607 // free chunk.
aoqi@0 1608 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
aoqi@0 1609 i = 0;
aoqi@0 1610 i < (num_blk - 1);
aoqi@0 1611 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
aoqi@0 1612 i++) {
aoqi@0 1613 curFc->set_size(size);
aoqi@0 1614 // Don't record this as a return in order to try and
aoqi@0 1615 // determine the "returns" from a GC.
aoqi@0 1616 _bt.verify_not_unallocated((HeapWord*) fc, size);
aoqi@0 1617 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
aoqi@0 1618 _bt.mark_block((HeapWord*)curFc, size);
aoqi@0 1619 split_birth(size);
aoqi@0 1620 // Don't record the initial population of the indexed list
aoqi@0 1621 // as a split birth.
aoqi@0 1622 }
aoqi@0 1623
aoqi@0 1624 // check that the arithmetic was OK above
aoqi@0 1625 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
aoqi@0 1626 "inconsistency in carving newFc");
aoqi@0 1627 curFc->set_size(size);
aoqi@0 1628 _bt.mark_block((HeapWord*)curFc, size);
aoqi@0 1629 split_birth(size);
aoqi@0 1630 fc = curFc;
aoqi@0 1631 } else {
aoqi@0 1632 // Return entire block to caller
aoqi@0 1633 fc = newFc;
aoqi@0 1634 }
aoqi@0 1635 }
aoqi@0 1636 }
aoqi@0 1637 } else {
aoqi@0 1638 // Get a free chunk from the free chunk dictionary to be returned to
aoqi@0 1639 // replenish the indexed free list.
aoqi@0 1640 fc = getChunkFromDictionaryExact(size);
aoqi@0 1641 }
aoqi@0 1642 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
aoqi@0 1643 return fc;
aoqi@0 1644 }
aoqi@0 1645
aoqi@0 1646 FreeChunk*
aoqi@0 1647 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
aoqi@0 1648 assert_locked();
aoqi@0 1649 FreeChunk* fc = _dictionary->get_chunk(size,
aoqi@0 1650 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1651 if (fc == NULL) {
aoqi@0 1652 return NULL;
aoqi@0 1653 }
aoqi@0 1654 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1655 if (fc->size() >= size + MinChunkSize) {
aoqi@0 1656 fc = splitChunkAndReturnRemainder(fc, size);
aoqi@0 1657 }
aoqi@0 1658 assert(fc->size() >= size, "chunk too small");
aoqi@0 1659 assert(fc->size() < size + MinChunkSize, "chunk too big");
aoqi@0 1660 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 1661 return fc;
aoqi@0 1662 }
aoqi@0 1663
aoqi@0 1664 FreeChunk*
aoqi@0 1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
aoqi@0 1666 assert_locked();
aoqi@0 1667 FreeChunk* fc = _dictionary->get_chunk(size,
aoqi@0 1668 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1669 if (fc == NULL) {
aoqi@0 1670 return fc;
aoqi@0 1671 }
aoqi@0 1672 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1673 if (fc->size() == size) {
aoqi@0 1674 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1675 return fc;
aoqi@0 1676 }
aoqi@0 1677 assert(fc->size() > size, "get_chunk() guarantee");
aoqi@0 1678 if (fc->size() < size + MinChunkSize) {
aoqi@0 1679 // Return the chunk to the dictionary and go get a bigger one.
aoqi@0 1680 returnChunkToDictionary(fc);
aoqi@0 1681 fc = _dictionary->get_chunk(size + MinChunkSize,
aoqi@0 1682 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1683 if (fc == NULL) {
aoqi@0 1684 return NULL;
aoqi@0 1685 }
aoqi@0 1686 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1687 }
aoqi@0 1688 assert(fc->size() >= size + MinChunkSize, "tautology");
aoqi@0 1689 fc = splitChunkAndReturnRemainder(fc, size);
aoqi@0 1690 assert(fc->size() == size, "chunk is wrong size");
aoqi@0 1691 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1692 return fc;
aoqi@0 1693 }
aoqi@0 1694
aoqi@0 1695 void
aoqi@0 1696 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
aoqi@0 1697 assert_locked();
aoqi@0 1698
aoqi@0 1699 size_t size = chunk->size();
aoqi@0 1700 _bt.verify_single_block((HeapWord*)chunk, size);
aoqi@0 1701 // adjust _unallocated_block downward, as necessary
aoqi@0 1702 _bt.freed((HeapWord*)chunk, size);
aoqi@0 1703 _dictionary->return_chunk(chunk);
aoqi@0 1704 #ifndef PRODUCT
aoqi@0 1705 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
aoqi@0 1706 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
aoqi@0 1707 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
aoqi@0 1708 tl->verify_stats();
aoqi@0 1709 }
aoqi@0 1710 #endif // PRODUCT
aoqi@0 1711 }
aoqi@0 1712
aoqi@0 1713 void
aoqi@0 1714 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
aoqi@0 1715 assert_locked();
aoqi@0 1716 size_t size = fc->size();
aoqi@0 1717 _bt.verify_single_block((HeapWord*) fc, size);
aoqi@0 1718 _bt.verify_not_unallocated((HeapWord*) fc, size);
aoqi@0 1719 if (_adaptive_freelists) {
aoqi@0 1720 _indexedFreeList[size].return_chunk_at_tail(fc);
aoqi@0 1721 } else {
aoqi@0 1722 _indexedFreeList[size].return_chunk_at_head(fc);
aoqi@0 1723 }
aoqi@0 1724 #ifndef PRODUCT
aoqi@0 1725 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
aoqi@0 1726 _indexedFreeList[size].verify_stats();
aoqi@0 1727 }
aoqi@0 1728 #endif // PRODUCT
aoqi@0 1729 }
aoqi@0 1730
aoqi@0 1731 // Add chunk to end of last block -- if it's the largest
aoqi@0 1732 // block -- and update BOT and census data. We would
aoqi@0 1733 // of course have preferred to coalesce it with the
aoqi@0 1734 // last block, but it's currently less expensive to find the
aoqi@0 1735 // largest block than it is to find the last.
aoqi@0 1736 void
aoqi@0 1737 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
aoqi@0 1738 HeapWord* chunk, size_t size) {
aoqi@0 1739 // check that the chunk does lie in this space!
aoqi@0 1740 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
aoqi@0 1741 // One of the parallel gc task threads may be here
aoqi@0 1742 // whilst others are allocating.
aoqi@0 1743 Mutex* lock = NULL;
aoqi@0 1744 if (ParallelGCThreads != 0) {
aoqi@0 1745 lock = &_parDictionaryAllocLock;
aoqi@0 1746 }
aoqi@0 1747 FreeChunk* ec;
aoqi@0 1748 {
aoqi@0 1749 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
aoqi@0 1750 ec = dictionary()->find_largest_dict(); // get largest block
aoqi@0 1751 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
aoqi@0 1752 // It's a coterminal block - we can coalesce.
aoqi@0 1753 size_t old_size = ec->size();
aoqi@0 1754 coalDeath(old_size);
aoqi@0 1755 removeChunkFromDictionary(ec);
aoqi@0 1756 size += old_size;
aoqi@0 1757 } else {
aoqi@0 1758 ec = (FreeChunk*)chunk;
aoqi@0 1759 }
aoqi@0 1760 }
aoqi@0 1761 ec->set_size(size);
aoqi@0 1762 debug_only(ec->mangleFreed(size));
aoqi@0 1763 if (size < SmallForDictionary && ParallelGCThreads != 0) {
aoqi@0 1764 lock = _indexedFreeListParLocks[size];
aoqi@0 1765 }
aoqi@0 1766 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
aoqi@0 1767 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
aoqi@0 1768 // record the birth under the lock since the recording involves
aoqi@0 1769 // manipulation of the list on which the chunk lives and
aoqi@0 1770 // if the chunk is allocated and is the last on the list,
aoqi@0 1771 // the list can go away.
aoqi@0 1772 coalBirth(size);
aoqi@0 1773 }
aoqi@0 1774
aoqi@0 1775 void
aoqi@0 1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
aoqi@0 1777 size_t size) {
aoqi@0 1778 // check that the chunk does lie in this space!
aoqi@0 1779 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
aoqi@0 1780 assert_locked();
aoqi@0 1781 _bt.verify_single_block(chunk, size);
aoqi@0 1782
aoqi@0 1783 FreeChunk* fc = (FreeChunk*) chunk;
aoqi@0 1784 fc->set_size(size);
aoqi@0 1785 debug_only(fc->mangleFreed(size));
aoqi@0 1786 if (size < SmallForDictionary) {
aoqi@0 1787 returnChunkToFreeList(fc);
aoqi@0 1788 } else {
aoqi@0 1789 returnChunkToDictionary(fc);
aoqi@0 1790 }
aoqi@0 1791 }
aoqi@0 1792
aoqi@0 1793 void
aoqi@0 1794 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
aoqi@0 1795 size_t size, bool coalesced) {
aoqi@0 1796 assert_locked();
aoqi@0 1797 assert(chunk != NULL, "null chunk");
aoqi@0 1798 if (coalesced) {
aoqi@0 1799 // repair BOT
aoqi@0 1800 _bt.single_block(chunk, size);
aoqi@0 1801 }
aoqi@0 1802 addChunkToFreeLists(chunk, size);
aoqi@0 1803 }
aoqi@0 1804
aoqi@0 1805 // We _must_ find the purported chunk on our free lists;
aoqi@0 1806 // we assert if we don't.
aoqi@0 1807 void
aoqi@0 1808 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
aoqi@0 1809 size_t size = fc->size();
aoqi@0 1810 assert_locked();
aoqi@0 1811 debug_only(verifyFreeLists());
aoqi@0 1812 if (size < SmallForDictionary) {
aoqi@0 1813 removeChunkFromIndexedFreeList(fc);
aoqi@0 1814 } else {
aoqi@0 1815 removeChunkFromDictionary(fc);
aoqi@0 1816 }
aoqi@0 1817 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1818 debug_only(verifyFreeLists());
aoqi@0 1819 }
aoqi@0 1820
aoqi@0 1821 void
aoqi@0 1822 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
aoqi@0 1823 size_t size = fc->size();
aoqi@0 1824 assert_locked();
aoqi@0 1825 assert(fc != NULL, "null chunk");
aoqi@0 1826 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1827 _dictionary->remove_chunk(fc);
aoqi@0 1828 // adjust _unallocated_block upward, as necessary
aoqi@0 1829 _bt.allocated((HeapWord*)fc, size);
aoqi@0 1830 }
aoqi@0 1831
aoqi@0 1832 void
aoqi@0 1833 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
aoqi@0 1834 assert_locked();
aoqi@0 1835 size_t size = fc->size();
aoqi@0 1836 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1837 NOT_PRODUCT(
aoqi@0 1838 if (FLSVerifyIndexTable) {
aoqi@0 1839 verifyIndexedFreeList(size);
aoqi@0 1840 }
aoqi@0 1841 )
aoqi@0 1842 _indexedFreeList[size].remove_chunk(fc);
aoqi@0 1843 NOT_PRODUCT(
aoqi@0 1844 if (FLSVerifyIndexTable) {
aoqi@0 1845 verifyIndexedFreeList(size);
aoqi@0 1846 }
aoqi@0 1847 )
aoqi@0 1848 }
aoqi@0 1849
aoqi@0 1850 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
aoqi@0 1851 /* A hint is the next larger size that has a surplus.
aoqi@0 1852 Start search at a size large enough to guarantee that
aoqi@0 1853 the excess is >= MIN_CHUNK. */
aoqi@0 1854 size_t start = align_object_size(numWords + MinChunkSize);
aoqi@0 1855 if (start < IndexSetSize) {
aoqi@0 1856 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
aoqi@0 1857 size_t hint = _indexedFreeList[start].hint();
aoqi@0 1858 while (hint < IndexSetSize) {
aoqi@0 1859 assert(hint % MinObjAlignment == 0, "hint should be aligned");
aoqi@0 1860 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
aoqi@0 1861 if (fl->surplus() > 0 && fl->head() != NULL) {
aoqi@0 1862 // Found a list with surplus, reset original hint
aoqi@0 1863 // and split out a free chunk which is returned.
aoqi@0 1864 _indexedFreeList[start].set_hint(hint);
aoqi@0 1865 FreeChunk* res = getFromListGreater(fl, numWords);
aoqi@0 1866 assert(res == NULL || res->is_free(),
aoqi@0 1867 "Should be returning a free chunk");
aoqi@0 1868 return res;
aoqi@0 1869 }
aoqi@0 1870 hint = fl->hint(); /* keep looking */
aoqi@0 1871 }
aoqi@0 1872 /* None found. */
aoqi@0 1873 it[start].set_hint(IndexSetSize);
aoqi@0 1874 }
aoqi@0 1875 return NULL;
aoqi@0 1876 }
aoqi@0 1877
aoqi@0 1878 /* Requires fl->size >= numWords + MinChunkSize */
aoqi@0 1879 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
aoqi@0 1880 size_t numWords) {
aoqi@0 1881 FreeChunk *curr = fl->head();
aoqi@0 1882 size_t oldNumWords = curr->size();
aoqi@0 1883 assert(numWords >= MinChunkSize, "Word size is too small");
aoqi@0 1884 assert(curr != NULL, "List is empty");
aoqi@0 1885 assert(oldNumWords >= numWords + MinChunkSize,
aoqi@0 1886 "Size of chunks in the list is too small");
aoqi@0 1887
aoqi@0 1888 fl->remove_chunk(curr);
aoqi@0 1889 // recorded indirectly by splitChunkAndReturnRemainder -
aoqi@0 1890 // smallSplit(oldNumWords, numWords);
aoqi@0 1891 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
aoqi@0 1892 // Does anything have to be done for the remainder in terms of
aoqi@0 1893 // fixing the card table?
aoqi@0 1894 assert(new_chunk == NULL || new_chunk->is_free(),
aoqi@0 1895 "Should be returning a free chunk");
aoqi@0 1896 return new_chunk;
aoqi@0 1897 }
aoqi@0 1898
aoqi@0 1899 FreeChunk*
aoqi@0 1900 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
aoqi@0 1901 size_t new_size) {
aoqi@0 1902 assert_locked();
aoqi@0 1903 size_t size = chunk->size();
aoqi@0 1904 assert(size > new_size, "Split from a smaller block?");
aoqi@0 1905 assert(is_aligned(chunk), "alignment problem");
aoqi@0 1906 assert(size == adjustObjectSize(size), "alignment problem");
aoqi@0 1907 size_t rem_size = size - new_size;
aoqi@0 1908 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
aoqi@0 1909 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
aoqi@0 1910 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
aoqi@0 1911 assert(is_aligned(ffc), "alignment problem");
aoqi@0 1912 ffc->set_size(rem_size);
aoqi@0 1913 ffc->link_next(NULL);
aoqi@0 1914 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 1915 // Above must occur before BOT is updated below.
aoqi@0 1916 // adjust block offset table
aoqi@0 1917 OrderAccess::storestore();
aoqi@0 1918 assert(chunk->is_free() && ffc->is_free(), "Error");
aoqi@0 1919 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
aoqi@0 1920 if (rem_size < SmallForDictionary) {
aoqi@0 1921 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
aoqi@0 1922 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
aoqi@0 1923 assert(!is_par ||
aoqi@0 1924 (SharedHeap::heap()->n_par_threads() ==
aoqi@0 1925 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
aoqi@0 1926 returnChunkToFreeList(ffc);
aoqi@0 1927 split(size, rem_size);
aoqi@0 1928 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
aoqi@0 1929 } else {
aoqi@0 1930 returnChunkToDictionary(ffc);
aoqi@0 1931 split(size ,rem_size);
aoqi@0 1932 }
aoqi@0 1933 chunk->set_size(new_size);
aoqi@0 1934 return chunk;
aoqi@0 1935 }
aoqi@0 1936
aoqi@0 1937 void
aoqi@0 1938 CompactibleFreeListSpace::sweep_completed() {
aoqi@0 1939 // Now that space is probably plentiful, refill linear
aoqi@0 1940 // allocation blocks as needed.
aoqi@0 1941 refillLinearAllocBlocksIfNeeded();
aoqi@0 1942 }
aoqi@0 1943
aoqi@0 1944 void
aoqi@0 1945 CompactibleFreeListSpace::gc_prologue() {
aoqi@0 1946 assert_locked();
aoqi@0 1947 if (PrintFLSStatistics != 0) {
aoqi@0 1948 gclog_or_tty->print("Before GC:\n");
aoqi@0 1949 reportFreeListStatistics();
aoqi@0 1950 }
aoqi@0 1951 refillLinearAllocBlocksIfNeeded();
aoqi@0 1952 }
aoqi@0 1953
aoqi@0 1954 void
aoqi@0 1955 CompactibleFreeListSpace::gc_epilogue() {
aoqi@0 1956 assert_locked();
aoqi@0 1957 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
aoqi@0 1958 if (_smallLinearAllocBlock._word_size == 0)
aoqi@0 1959 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
aoqi@0 1960 }
aoqi@0 1961 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
aoqi@0 1962 _promoInfo.stopTrackingPromotions();
aoqi@0 1963 repairLinearAllocationBlocks();
aoqi@0 1964 // Print Space's stats
aoqi@0 1965 if (PrintFLSStatistics != 0) {
aoqi@0 1966 gclog_or_tty->print("After GC:\n");
aoqi@0 1967 reportFreeListStatistics();
aoqi@0 1968 }
aoqi@0 1969 }
aoqi@0 1970
aoqi@0 1971 // Iteration support, mostly delegated from a CMS generation
aoqi@0 1972
aoqi@0 1973 void CompactibleFreeListSpace::save_marks() {
aoqi@0 1974 assert(Thread::current()->is_VM_thread(),
aoqi@0 1975 "Global variable should only be set when single-threaded");
aoqi@0 1976 // Mark the "end" of the used space at the time of this call;
aoqi@0 1977 // note, however, that promoted objects from this point
aoqi@0 1978 // on are tracked in the _promoInfo below.
aoqi@0 1979 set_saved_mark_word(unallocated_block());
aoqi@0 1980 #ifdef ASSERT
aoqi@0 1981 // Check the sanity of save_marks() etc.
aoqi@0 1982 MemRegion ur = used_region();
aoqi@0 1983 MemRegion urasm = used_region_at_save_marks();
aoqi@0 1984 assert(ur.contains(urasm),
aoqi@0 1985 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
aoqi@0 1986 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
aoqi@0 1987 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
aoqi@0 1988 #endif
aoqi@0 1989 // inform allocator that promotions should be tracked.
aoqi@0 1990 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
aoqi@0 1991 _promoInfo.startTrackingPromotions();
aoqi@0 1992 }
aoqi@0 1993
aoqi@0 1994 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
aoqi@0 1995 assert(_promoInfo.tracking(), "No preceding save_marks?");
aoqi@0 1996 assert(SharedHeap::heap()->n_par_threads() == 0,
aoqi@0 1997 "Shouldn't be called if using parallel gc.");
aoqi@0 1998 return _promoInfo.noPromotions();
aoqi@0 1999 }
aoqi@0 2000
aoqi@0 2001 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
aoqi@0 2002 \
aoqi@0 2003 void CompactibleFreeListSpace:: \
aoqi@0 2004 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
aoqi@0 2005 assert(SharedHeap::heap()->n_par_threads() == 0, \
aoqi@0 2006 "Shouldn't be called (yet) during parallel part of gc."); \
aoqi@0 2007 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
aoqi@0 2008 /* \
aoqi@0 2009 * This also restores any displaced headers and removes the elements from \
aoqi@0 2010 * the iteration set as they are processed, so that we have a clean slate \
aoqi@0 2011 * at the end of the iteration. Note, thus, that if new objects are \
aoqi@0 2012 * promoted as a result of the iteration they are iterated over as well. \
aoqi@0 2013 */ \
aoqi@0 2014 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
aoqi@0 2015 }
aoqi@0 2016
aoqi@0 2017 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
aoqi@0 2018
aoqi@0 2019 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
aoqi@0 2020 return _smallLinearAllocBlock._word_size == 0;
aoqi@0 2021 }
aoqi@0 2022
aoqi@0 2023 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
aoqi@0 2024 // Fix up linear allocation blocks to look like free blocks
aoqi@0 2025 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 2026 }
aoqi@0 2027
aoqi@0 2028 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
aoqi@0 2029 assert_locked();
aoqi@0 2030 if (blk->_ptr != NULL) {
aoqi@0 2031 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
aoqi@0 2032 "Minimum block size requirement");
aoqi@0 2033 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
aoqi@0 2034 fc->set_size(blk->_word_size);
aoqi@0 2035 fc->link_prev(NULL); // mark as free
aoqi@0 2036 fc->dontCoalesce();
aoqi@0 2037 assert(fc->is_free(), "just marked it free");
aoqi@0 2038 assert(fc->cantCoalesce(), "just marked it uncoalescable");
aoqi@0 2039 }
aoqi@0 2040 }
aoqi@0 2041
aoqi@0 2042 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
aoqi@0 2043 assert_locked();
aoqi@0 2044 if (_smallLinearAllocBlock._ptr == NULL) {
aoqi@0 2045 assert(_smallLinearAllocBlock._word_size == 0,
aoqi@0 2046 "Size of linAB should be zero if the ptr is NULL");
aoqi@0 2047 // Reset the linAB refill and allocation size limit.
aoqi@0 2048 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
aoqi@0 2049 }
aoqi@0 2050 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
aoqi@0 2051 }
aoqi@0 2052
aoqi@0 2053 void
aoqi@0 2054 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
aoqi@0 2055 assert_locked();
aoqi@0 2056 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
aoqi@0 2057 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
aoqi@0 2058 "blk invariant");
aoqi@0 2059 if (blk->_ptr == NULL) {
aoqi@0 2060 refillLinearAllocBlock(blk);
aoqi@0 2061 }
aoqi@0 2062 if (PrintMiscellaneous && Verbose) {
aoqi@0 2063 if (blk->_word_size == 0) {
aoqi@0 2064 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
aoqi@0 2065 }
aoqi@0 2066 }
aoqi@0 2067 }
aoqi@0 2068
aoqi@0 2069 void
aoqi@0 2070 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
aoqi@0 2071 assert_locked();
aoqi@0 2072 assert(blk->_word_size == 0 && blk->_ptr == NULL,
aoqi@0 2073 "linear allocation block should be empty");
aoqi@0 2074 FreeChunk* fc;
aoqi@0 2075 if (blk->_refillSize < SmallForDictionary &&
aoqi@0 2076 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
aoqi@0 2077 // A linAB's strategy might be to use small sizes to reduce
aoqi@0 2078 // fragmentation but still get the benefits of allocation from a
aoqi@0 2079 // linAB.
aoqi@0 2080 } else {
aoqi@0 2081 fc = getChunkFromDictionary(blk->_refillSize);
aoqi@0 2082 }
aoqi@0 2083 if (fc != NULL) {
aoqi@0 2084 blk->_ptr = (HeapWord*)fc;
aoqi@0 2085 blk->_word_size = fc->size();
aoqi@0 2086 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
aoqi@0 2087 }
aoqi@0 2088 }
aoqi@0 2089
aoqi@0 2090 // Support for concurrent collection policy decisions.
aoqi@0 2091 bool CompactibleFreeListSpace::should_concurrent_collect() const {
aoqi@0 2092 // In the future we might want to add in frgamentation stats --
aoqi@0 2093 // including erosion of the "mountain" into this decision as well.
aoqi@0 2094 return !adaptive_freelists() && linearAllocationWouldFail();
aoqi@0 2095 }
aoqi@0 2096
aoqi@0 2097 // Support for compaction
aoqi@0 2098
aoqi@0 2099 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
aoqi@0 2100 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
aoqi@0 2101 // prepare_for_compaction() uses the space between live objects
aoqi@0 2102 // so that later phase can skip dead space quickly. So verification
aoqi@0 2103 // of the free lists doesn't work after.
aoqi@0 2104 }
aoqi@0 2105
aoqi@0 2106 #define obj_size(q) adjustObjectSize(oop(q)->size())
aoqi@0 2107 #define adjust_obj_size(s) adjustObjectSize(s)
aoqi@0 2108
aoqi@0 2109 void CompactibleFreeListSpace::adjust_pointers() {
aoqi@0 2110 // In other versions of adjust_pointers(), a bail out
aoqi@0 2111 // based on the amount of live data in the generation
aoqi@0 2112 // (i.e., if 0, bail out) may be used.
aoqi@0 2113 // Cannot test used() == 0 here because the free lists have already
aoqi@0 2114 // been mangled by the compaction.
aoqi@0 2115
aoqi@0 2116 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
aoqi@0 2117 // See note about verification in prepare_for_compaction().
aoqi@0 2118 }
aoqi@0 2119
aoqi@0 2120 void CompactibleFreeListSpace::compact() {
aoqi@0 2121 SCAN_AND_COMPACT(obj_size);
aoqi@0 2122 }
aoqi@0 2123
aoqi@0 2124 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
aoqi@0 2125 // where fbs is free block sizes
aoqi@0 2126 double CompactibleFreeListSpace::flsFrag() const {
aoqi@0 2127 size_t itabFree = totalSizeInIndexedFreeLists();
aoqi@0 2128 double frag = 0.0;
aoqi@0 2129 size_t i;
aoqi@0 2130
aoqi@0 2131 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2132 double sz = i;
aoqi@0 2133 frag += _indexedFreeList[i].count() * (sz * sz);
aoqi@0 2134 }
aoqi@0 2135
aoqi@0 2136 double totFree = itabFree +
aoqi@0 2137 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
aoqi@0 2138 if (totFree > 0) {
aoqi@0 2139 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
aoqi@0 2140 (totFree * totFree));
aoqi@0 2141 frag = (double)1.0 - frag;
aoqi@0 2142 } else {
aoqi@0 2143 assert(frag == 0.0, "Follows from totFree == 0");
aoqi@0 2144 }
aoqi@0 2145 return frag;
aoqi@0 2146 }
aoqi@0 2147
aoqi@0 2148 void CompactibleFreeListSpace::beginSweepFLCensus(
aoqi@0 2149 float inter_sweep_current,
aoqi@0 2150 float inter_sweep_estimate,
aoqi@0 2151 float intra_sweep_estimate) {
aoqi@0 2152 assert_locked();
aoqi@0 2153 size_t i;
aoqi@0 2154 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2155 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
aoqi@0 2156 if (PrintFLSStatistics > 1) {
aoqi@0 2157 gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
aoqi@0 2158 }
aoqi@0 2159 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
aoqi@0 2160 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
aoqi@0 2161 fl->set_before_sweep(fl->count());
aoqi@0 2162 fl->set_bfr_surp(fl->surplus());
aoqi@0 2163 }
aoqi@0 2164 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
aoqi@0 2165 inter_sweep_current,
aoqi@0 2166 inter_sweep_estimate,
aoqi@0 2167 intra_sweep_estimate);
aoqi@0 2168 }
aoqi@0 2169
aoqi@0 2170 void CompactibleFreeListSpace::setFLSurplus() {
aoqi@0 2171 assert_locked();
aoqi@0 2172 size_t i;
aoqi@0 2173 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2174 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2175 fl->set_surplus(fl->count() -
aoqi@0 2176 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
aoqi@0 2177 }
aoqi@0 2178 }
aoqi@0 2179
aoqi@0 2180 void CompactibleFreeListSpace::setFLHints() {
aoqi@0 2181 assert_locked();
aoqi@0 2182 size_t i;
aoqi@0 2183 size_t h = IndexSetSize;
aoqi@0 2184 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
aoqi@0 2185 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2186 fl->set_hint(h);
aoqi@0 2187 if (fl->surplus() > 0) {
aoqi@0 2188 h = i;
aoqi@0 2189 }
aoqi@0 2190 }
aoqi@0 2191 }
aoqi@0 2192
aoqi@0 2193 void CompactibleFreeListSpace::clearFLCensus() {
aoqi@0 2194 assert_locked();
aoqi@0 2195 size_t i;
aoqi@0 2196 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2197 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2198 fl->set_prev_sweep(fl->count());
aoqi@0 2199 fl->set_coal_births(0);
aoqi@0 2200 fl->set_coal_deaths(0);
aoqi@0 2201 fl->set_split_births(0);
aoqi@0 2202 fl->set_split_deaths(0);
aoqi@0 2203 }
aoqi@0 2204 }
aoqi@0 2205
aoqi@0 2206 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
aoqi@0 2207 if (PrintFLSStatistics > 0) {
aoqi@0 2208 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
aoqi@0 2209 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
aoqi@0 2210 p2i(largestAddr));
aoqi@0 2211 }
aoqi@0 2212 setFLSurplus();
aoqi@0 2213 setFLHints();
aoqi@0 2214 if (PrintGC && PrintFLSCensus > 0) {
aoqi@0 2215 printFLCensus(sweep_count);
aoqi@0 2216 }
aoqi@0 2217 clearFLCensus();
aoqi@0 2218 assert_locked();
aoqi@0 2219 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
aoqi@0 2220 }
aoqi@0 2221
aoqi@0 2222 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
aoqi@0 2223 if (size < SmallForDictionary) {
aoqi@0 2224 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2225 return (fl->coal_desired() < 0) ||
aoqi@0 2226 ((int)fl->count() > fl->coal_desired());
aoqi@0 2227 } else {
aoqi@0 2228 return dictionary()->coal_dict_over_populated(size);
aoqi@0 2229 }
aoqi@0 2230 }
aoqi@0 2231
aoqi@0 2232 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
aoqi@0 2233 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2234 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2235 fl->increment_coal_births();
aoqi@0 2236 fl->increment_surplus();
aoqi@0 2237 }
aoqi@0 2238
aoqi@0 2239 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
aoqi@0 2240 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2241 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2242 fl->increment_coal_deaths();
aoqi@0 2243 fl->decrement_surplus();
aoqi@0 2244 }
aoqi@0 2245
aoqi@0 2246 void CompactibleFreeListSpace::coalBirth(size_t size) {
aoqi@0 2247 if (size < SmallForDictionary) {
aoqi@0 2248 smallCoalBirth(size);
aoqi@0 2249 } else {
aoqi@0 2250 dictionary()->dict_census_update(size,
aoqi@0 2251 false /* split */,
aoqi@0 2252 true /* birth */);
aoqi@0 2253 }
aoqi@0 2254 }
aoqi@0 2255
aoqi@0 2256 void CompactibleFreeListSpace::coalDeath(size_t size) {
aoqi@0 2257 if(size < SmallForDictionary) {
aoqi@0 2258 smallCoalDeath(size);
aoqi@0 2259 } else {
aoqi@0 2260 dictionary()->dict_census_update(size,
aoqi@0 2261 false /* split */,
aoqi@0 2262 false /* birth */);
aoqi@0 2263 }
aoqi@0 2264 }
aoqi@0 2265
aoqi@0 2266 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
aoqi@0 2267 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2268 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2269 fl->increment_split_births();
aoqi@0 2270 fl->increment_surplus();
aoqi@0 2271 }
aoqi@0 2272
aoqi@0 2273 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
aoqi@0 2274 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2275 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2276 fl->increment_split_deaths();
aoqi@0 2277 fl->decrement_surplus();
aoqi@0 2278 }
aoqi@0 2279
aoqi@0 2280 void CompactibleFreeListSpace::split_birth(size_t size) {
aoqi@0 2281 if (size < SmallForDictionary) {
aoqi@0 2282 smallSplitBirth(size);
aoqi@0 2283 } else {
aoqi@0 2284 dictionary()->dict_census_update(size,
aoqi@0 2285 true /* split */,
aoqi@0 2286 true /* birth */);
aoqi@0 2287 }
aoqi@0 2288 }
aoqi@0 2289
aoqi@0 2290 void CompactibleFreeListSpace::splitDeath(size_t size) {
aoqi@0 2291 if (size < SmallForDictionary) {
aoqi@0 2292 smallSplitDeath(size);
aoqi@0 2293 } else {
aoqi@0 2294 dictionary()->dict_census_update(size,
aoqi@0 2295 true /* split */,
aoqi@0 2296 false /* birth */);
aoqi@0 2297 }
aoqi@0 2298 }
aoqi@0 2299
aoqi@0 2300 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
aoqi@0 2301 size_t to2 = from - to1;
aoqi@0 2302 splitDeath(from);
aoqi@0 2303 split_birth(to1);
aoqi@0 2304 split_birth(to2);
aoqi@0 2305 }
aoqi@0 2306
aoqi@0 2307 void CompactibleFreeListSpace::print() const {
aoqi@0 2308 print_on(tty);
aoqi@0 2309 }
aoqi@0 2310
aoqi@0 2311 void CompactibleFreeListSpace::prepare_for_verify() {
aoqi@0 2312 assert_locked();
aoqi@0 2313 repairLinearAllocationBlocks();
aoqi@0 2314 // Verify that the SpoolBlocks look like free blocks of
aoqi@0 2315 // appropriate sizes... To be done ...
aoqi@0 2316 }
aoqi@0 2317
aoqi@0 2318 class VerifyAllBlksClosure: public BlkClosure {
aoqi@0 2319 private:
aoqi@0 2320 const CompactibleFreeListSpace* _sp;
aoqi@0 2321 const MemRegion _span;
aoqi@0 2322 HeapWord* _last_addr;
aoqi@0 2323 size_t _last_size;
aoqi@0 2324 bool _last_was_obj;
aoqi@0 2325 bool _last_was_live;
aoqi@0 2326
aoqi@0 2327 public:
aoqi@0 2328 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
aoqi@0 2329 MemRegion span) : _sp(sp), _span(span),
aoqi@0 2330 _last_addr(NULL), _last_size(0),
aoqi@0 2331 _last_was_obj(false), _last_was_live(false) { }
aoqi@0 2332
aoqi@0 2333 virtual size_t do_blk(HeapWord* addr) {
aoqi@0 2334 size_t res;
aoqi@0 2335 bool was_obj = false;
aoqi@0 2336 bool was_live = false;
aoqi@0 2337 if (_sp->block_is_obj(addr)) {
aoqi@0 2338 was_obj = true;
aoqi@0 2339 oop p = oop(addr);
aoqi@0 2340 guarantee(p->is_oop(), "Should be an oop");
aoqi@0 2341 res = _sp->adjustObjectSize(p->size());
aoqi@0 2342 if (_sp->obj_is_alive(addr)) {
aoqi@0 2343 was_live = true;
aoqi@0 2344 p->verify();
aoqi@0 2345 }
aoqi@0 2346 } else {
aoqi@0 2347 FreeChunk* fc = (FreeChunk*)addr;
aoqi@0 2348 res = fc->size();
aoqi@0 2349 if (FLSVerifyLists && !fc->cantCoalesce()) {
aoqi@0 2350 guarantee(_sp->verify_chunk_in_free_list(fc),
aoqi@0 2351 "Chunk should be on a free list");
aoqi@0 2352 }
aoqi@0 2353 }
aoqi@0 2354 if (res == 0) {
aoqi@0 2355 gclog_or_tty->print_cr("Livelock: no rank reduction!");
aoqi@0 2356 gclog_or_tty->print_cr(
aoqi@0 2357 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
aoqi@0 2358 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
aoqi@0 2359 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
aoqi@0 2360 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
aoqi@0 2361 _sp->print_on(gclog_or_tty);
aoqi@0 2362 guarantee(false, "Seppuku!");
aoqi@0 2363 }
aoqi@0 2364 _last_addr = addr;
aoqi@0 2365 _last_size = res;
aoqi@0 2366 _last_was_obj = was_obj;
aoqi@0 2367 _last_was_live = was_live;
aoqi@0 2368 return res;
aoqi@0 2369 }
aoqi@0 2370 };
aoqi@0 2371
aoqi@0 2372 class VerifyAllOopsClosure: public OopClosure {
aoqi@0 2373 private:
aoqi@0 2374 const CMSCollector* _collector;
aoqi@0 2375 const CompactibleFreeListSpace* _sp;
aoqi@0 2376 const MemRegion _span;
aoqi@0 2377 const bool _past_remark;
aoqi@0 2378 const CMSBitMap* _bit_map;
aoqi@0 2379
aoqi@0 2380 protected:
aoqi@0 2381 void do_oop(void* p, oop obj) {
aoqi@0 2382 if (_span.contains(obj)) { // the interior oop points into CMS heap
aoqi@0 2383 if (!_span.contains(p)) { // reference from outside CMS heap
aoqi@0 2384 // Should be a valid object; the first disjunct below allows
aoqi@0 2385 // us to sidestep an assertion in block_is_obj() that insists
aoqi@0 2386 // that p be in _sp. Note that several generations (and spaces)
aoqi@0 2387 // are spanned by _span (CMS heap) above.
aoqi@0 2388 guarantee(!_sp->is_in_reserved(obj) ||
aoqi@0 2389 _sp->block_is_obj((HeapWord*)obj),
aoqi@0 2390 "Should be an object");
aoqi@0 2391 guarantee(obj->is_oop(), "Should be an oop");
aoqi@0 2392 obj->verify();
aoqi@0 2393 if (_past_remark) {
aoqi@0 2394 // Remark has been completed, the object should be marked
aoqi@0 2395 _bit_map->isMarked((HeapWord*)obj);
aoqi@0 2396 }
aoqi@0 2397 } else { // reference within CMS heap
aoqi@0 2398 if (_past_remark) {
aoqi@0 2399 // Remark has been completed -- so the referent should have
aoqi@0 2400 // been marked, if referring object is.
aoqi@0 2401 if (_bit_map->isMarked(_collector->block_start(p))) {
aoqi@0 2402 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
aoqi@0 2403 }
aoqi@0 2404 }
aoqi@0 2405 }
aoqi@0 2406 } else if (_sp->is_in_reserved(p)) {
aoqi@0 2407 // the reference is from FLS, and points out of FLS
aoqi@0 2408 guarantee(obj->is_oop(), "Should be an oop");
aoqi@0 2409 obj->verify();
aoqi@0 2410 }
aoqi@0 2411 }
aoqi@0 2412
aoqi@0 2413 template <class T> void do_oop_work(T* p) {
aoqi@0 2414 T heap_oop = oopDesc::load_heap_oop(p);
aoqi@0 2415 if (!oopDesc::is_null(heap_oop)) {
aoqi@0 2416 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
aoqi@0 2417 do_oop(p, obj);
aoqi@0 2418 }
aoqi@0 2419 }
aoqi@0 2420
aoqi@0 2421 public:
aoqi@0 2422 VerifyAllOopsClosure(const CMSCollector* collector,
aoqi@0 2423 const CompactibleFreeListSpace* sp, MemRegion span,
aoqi@0 2424 bool past_remark, CMSBitMap* bit_map) :
aoqi@0 2425 _collector(collector), _sp(sp), _span(span),
aoqi@0 2426 _past_remark(past_remark), _bit_map(bit_map) { }
aoqi@0 2427
aoqi@0 2428 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
aoqi@0 2429 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
aoqi@0 2430 };
aoqi@0 2431
aoqi@0 2432 void CompactibleFreeListSpace::verify() const {
aoqi@0 2433 assert_lock_strong(&_freelistLock);
aoqi@0 2434 verify_objects_initialized();
aoqi@0 2435 MemRegion span = _collector->_span;
aoqi@0 2436 bool past_remark = (_collector->abstract_state() ==
aoqi@0 2437 CMSCollector::Sweeping);
aoqi@0 2438
aoqi@0 2439 ResourceMark rm;
aoqi@0 2440 HandleMark hm;
aoqi@0 2441
aoqi@0 2442 // Check integrity of CFL data structures
aoqi@0 2443 _promoInfo.verify();
aoqi@0 2444 _dictionary->verify();
aoqi@0 2445 if (FLSVerifyIndexTable) {
aoqi@0 2446 verifyIndexedFreeLists();
aoqi@0 2447 }
aoqi@0 2448 // Check integrity of all objects and free blocks in space
aoqi@0 2449 {
aoqi@0 2450 VerifyAllBlksClosure cl(this, span);
aoqi@0 2451 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
aoqi@0 2452 }
aoqi@0 2453 // Check that all references in the heap to FLS
aoqi@0 2454 // are to valid objects in FLS or that references in
aoqi@0 2455 // FLS are to valid objects elsewhere in the heap
aoqi@0 2456 if (FLSVerifyAllHeapReferences)
aoqi@0 2457 {
aoqi@0 2458 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
aoqi@0 2459 _collector->markBitMap());
aoqi@0 2460 CollectedHeap* ch = Universe::heap();
aoqi@0 2461
aoqi@0 2462 // Iterate over all oops in the heap. Uses the _no_header version
aoqi@0 2463 // since we are not interested in following the klass pointers.
aoqi@0 2464 ch->oop_iterate_no_header(&cl);
aoqi@0 2465 }
aoqi@0 2466
aoqi@0 2467 if (VerifyObjectStartArray) {
aoqi@0 2468 // Verify the block offset table
aoqi@0 2469 _bt.verify();
aoqi@0 2470 }
aoqi@0 2471 }
aoqi@0 2472
aoqi@0 2473 #ifndef PRODUCT
aoqi@0 2474 void CompactibleFreeListSpace::verifyFreeLists() const {
aoqi@0 2475 if (FLSVerifyLists) {
aoqi@0 2476 _dictionary->verify();
aoqi@0 2477 verifyIndexedFreeLists();
aoqi@0 2478 } else {
aoqi@0 2479 if (FLSVerifyDictionary) {
aoqi@0 2480 _dictionary->verify();
aoqi@0 2481 }
aoqi@0 2482 if (FLSVerifyIndexTable) {
aoqi@0 2483 verifyIndexedFreeLists();
aoqi@0 2484 }
aoqi@0 2485 }
aoqi@0 2486 }
aoqi@0 2487 #endif
aoqi@0 2488
aoqi@0 2489 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
aoqi@0 2490 size_t i = 0;
aoqi@0 2491 for (; i < IndexSetStart; i++) {
aoqi@0 2492 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
aoqi@0 2493 }
aoqi@0 2494 for (; i < IndexSetSize; i++) {
aoqi@0 2495 verifyIndexedFreeList(i);
aoqi@0 2496 }
aoqi@0 2497 }
aoqi@0 2498
aoqi@0 2499 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
aoqi@0 2500 FreeChunk* fc = _indexedFreeList[size].head();
aoqi@0 2501 FreeChunk* tail = _indexedFreeList[size].tail();
aoqi@0 2502 size_t num = _indexedFreeList[size].count();
aoqi@0 2503 size_t n = 0;
aoqi@0 2504 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
aoqi@0 2505 "Slot should have been empty");
aoqi@0 2506 for (; fc != NULL; fc = fc->next(), n++) {
aoqi@0 2507 guarantee(fc->size() == size, "Size inconsistency");
aoqi@0 2508 guarantee(fc->is_free(), "!free?");
aoqi@0 2509 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
aoqi@0 2510 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
aoqi@0 2511 }
aoqi@0 2512 guarantee(n == num, "Incorrect count");
aoqi@0 2513 }
aoqi@0 2514
aoqi@0 2515 #ifndef PRODUCT
aoqi@0 2516 void CompactibleFreeListSpace::check_free_list_consistency() const {
aoqi@0 2517 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
aoqi@0 2518 "Some sizes can't be allocated without recourse to"
aoqi@0 2519 " linear allocation buffers");
aoqi@0 2520 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
aoqi@0 2521 "else MIN_TREE_CHUNK_SIZE is wrong");
aoqi@0 2522 assert(IndexSetStart != 0, "IndexSetStart not initialized");
aoqi@0 2523 assert(IndexSetStride != 0, "IndexSetStride not initialized");
aoqi@0 2524 }
aoqi@0 2525 #endif
aoqi@0 2526
aoqi@0 2527 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
aoqi@0 2528 assert_lock_strong(&_freelistLock);
aoqi@0 2529 AdaptiveFreeList<FreeChunk> total;
aoqi@0 2530 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
aoqi@0 2531 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
aoqi@0 2532 size_t total_free = 0;
aoqi@0 2533 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2534 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2535 total_free += fl->count() * fl->size();
aoqi@0 2536 if (i % (40*IndexSetStride) == 0) {
aoqi@0 2537 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
aoqi@0 2538 }
aoqi@0 2539 fl->print_on(gclog_or_tty);
aoqi@0 2540 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
aoqi@0 2541 total.set_surplus( total.surplus() + fl->surplus() );
aoqi@0 2542 total.set_desired( total.desired() + fl->desired() );
aoqi@0 2543 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
aoqi@0 2544 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
aoqi@0 2545 total.set_count( total.count() + fl->count() );
aoqi@0 2546 total.set_coal_births( total.coal_births() + fl->coal_births() );
aoqi@0 2547 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
aoqi@0 2548 total.set_split_births(total.split_births() + fl->split_births());
aoqi@0 2549 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
aoqi@0 2550 }
aoqi@0 2551 total.print_on(gclog_or_tty, "TOTAL");
aoqi@0 2552 gclog_or_tty->print_cr("Total free in indexed lists "
aoqi@0 2553 SIZE_FORMAT " words", total_free);
aoqi@0 2554 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
aoqi@0 2555 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
aoqi@0 2556 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
aoqi@0 2557 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
aoqi@0 2558 _dictionary->print_dict_census();
aoqi@0 2559 }
aoqi@0 2560
aoqi@0 2561 ///////////////////////////////////////////////////////////////////////////
aoqi@0 2562 // CFLS_LAB
aoqi@0 2563 ///////////////////////////////////////////////////////////////////////////
aoqi@0 2564
aoqi@0 2565 #define VECTOR_257(x) \
aoqi@0 2566 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
aoqi@0 2567 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2568 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2569 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2570 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2571 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2572 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2573 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2574 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2575 x }
aoqi@0 2576
aoqi@0 2577 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
aoqi@0 2578 // OldPLABSize, whose static default is different; if overridden at the
aoqi@0 2579 // command-line, this will get reinitialized via a call to
aoqi@0 2580 // modify_initialization() below.
aoqi@0 2581 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
aoqi@0 2582 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
aoqi@0 2583 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
aoqi@0 2584 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
aoqi@0 2585
aoqi@0 2586 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
aoqi@0 2587 _cfls(cfls)
aoqi@0 2588 {
aoqi@0 2589 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
aoqi@0 2590 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2591 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2592 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2593 _indexedFreeList[i].set_size(i);
aoqi@0 2594 _num_blocks[i] = 0;
aoqi@0 2595 }
aoqi@0 2596 }
aoqi@0 2597
aoqi@0 2598 static bool _CFLS_LAB_modified = false;
aoqi@0 2599
aoqi@0 2600 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
aoqi@0 2601 assert(!_CFLS_LAB_modified, "Call only once");
aoqi@0 2602 _CFLS_LAB_modified = true;
aoqi@0 2603 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2604 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2605 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2606 _blocks_to_claim[i].modify(n, wt, true /* force */);
aoqi@0 2607 }
aoqi@0 2608 }
aoqi@0 2609
aoqi@0 2610 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
aoqi@0 2611 FreeChunk* res;
aoqi@0 2612 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
aoqi@0 2613 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
aoqi@0 2614 // This locking manages sync with other large object allocations.
aoqi@0 2615 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
aoqi@0 2616 Mutex::_no_safepoint_check_flag);
aoqi@0 2617 res = _cfls->getChunkFromDictionaryExact(word_sz);
aoqi@0 2618 if (res == NULL) return NULL;
aoqi@0 2619 } else {
aoqi@0 2620 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
aoqi@0 2621 if (fl->count() == 0) {
aoqi@0 2622 // Attempt to refill this local free list.
aoqi@0 2623 get_from_global_pool(word_sz, fl);
aoqi@0 2624 // If it didn't work, give up.
aoqi@0 2625 if (fl->count() == 0) return NULL;
aoqi@0 2626 }
aoqi@0 2627 res = fl->get_chunk_at_head();
aoqi@0 2628 assert(res != NULL, "Why was count non-zero?");
aoqi@0 2629 }
aoqi@0 2630 res->markNotFree();
aoqi@0 2631 assert(!res->is_free(), "shouldn't be marked free");
aoqi@0 2632 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
aoqi@0 2633 // mangle a just allocated object with a distinct pattern.
aoqi@0 2634 debug_only(res->mangleAllocated(word_sz));
aoqi@0 2635 return (HeapWord*)res;
aoqi@0 2636 }
aoqi@0 2637
aoqi@0 2638 // Get a chunk of blocks of the right size and update related
aoqi@0 2639 // book-keeping stats
aoqi@0 2640 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
aoqi@0 2641 // Get the #blocks we want to claim
aoqi@0 2642 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
aoqi@0 2643 assert(n_blks > 0, "Error");
jcoomes@7345 2644 assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
aoqi@0 2645 // In some cases, when the application has a phase change,
aoqi@0 2646 // there may be a sudden and sharp shift in the object survival
aoqi@0 2647 // profile, and updating the counts at the end of a scavenge
aoqi@0 2648 // may not be quick enough, giving rise to large scavenge pauses
aoqi@0 2649 // during these phase changes. It is beneficial to detect such
aoqi@0 2650 // changes on-the-fly during a scavenge and avoid such a phase-change
aoqi@0 2651 // pothole. The following code is a heuristic attempt to do that.
aoqi@0 2652 // It is protected by a product flag until we have gained
aoqi@0 2653 // enough experience with this heuristic and fine-tuned its behaviour.
aoqi@0 2654 // WARNING: This might increase fragmentation if we overreact to
aoqi@0 2655 // small spikes, so some kind of historical smoothing based on
aoqi@0 2656 // previous experience with the greater reactivity might be useful.
aoqi@0 2657 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
aoqi@0 2658 // default.
aoqi@0 2659 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
aoqi@0 2660 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
aoqi@0 2661 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
aoqi@0 2662 n_blks = MIN2(n_blks, CMSOldPLABMax);
aoqi@0 2663 }
aoqi@0 2664 assert(n_blks > 0, "Error");
aoqi@0 2665 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
aoqi@0 2666 // Update stats table entry for this block size
aoqi@0 2667 _num_blocks[word_sz] += fl->count();
aoqi@0 2668 }
aoqi@0 2669
aoqi@0 2670 void CFLS_LAB::compute_desired_plab_size() {
aoqi@0 2671 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2672 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2673 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2674 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
aoqi@0 2675 "Counter inconsistency");
aoqi@0 2676 if (_global_num_workers[i] > 0) {
aoqi@0 2677 // Need to smooth wrt historical average
aoqi@0 2678 if (ResizeOldPLAB) {
aoqi@0 2679 _blocks_to_claim[i].sample(
aoqi@0 2680 MAX2((size_t)CMSOldPLABMin,
aoqi@0 2681 MIN2((size_t)CMSOldPLABMax,
aoqi@0 2682 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
aoqi@0 2683 }
aoqi@0 2684 // Reset counters for next round
aoqi@0 2685 _global_num_workers[i] = 0;
aoqi@0 2686 _global_num_blocks[i] = 0;
aoqi@0 2687 if (PrintOldPLAB) {
aoqi@0 2688 gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
aoqi@0 2689 }
aoqi@0 2690 }
aoqi@0 2691 }
aoqi@0 2692 }
aoqi@0 2693
aoqi@0 2694 // If this is changed in the future to allow parallel
aoqi@0 2695 // access, one would need to take the FL locks and,
aoqi@0 2696 // depending on how it is used, stagger access from
aoqi@0 2697 // parallel threads to reduce contention.
aoqi@0 2698 void CFLS_LAB::retire(int tid) {
aoqi@0 2699 // We run this single threaded with the world stopped;
aoqi@0 2700 // so no need for locks and such.
aoqi@0 2701 NOT_PRODUCT(Thread* t = Thread::current();)
aoqi@0 2702 assert(Thread::current()->is_VM_thread(), "Error");
aoqi@0 2703 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2704 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2705 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2706 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
aoqi@0 2707 "Can't retire more than what we obtained");
aoqi@0 2708 if (_num_blocks[i] > 0) {
aoqi@0 2709 size_t num_retire = _indexedFreeList[i].count();
aoqi@0 2710 assert(_num_blocks[i] > num_retire, "Should have used at least one");
aoqi@0 2711 {
aoqi@0 2712 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
aoqi@0 2713 // Mutex::_no_safepoint_check_flag);
aoqi@0 2714
aoqi@0 2715 // Update globals stats for num_blocks used
aoqi@0 2716 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
aoqi@0 2717 _global_num_workers[i]++;
aoqi@0 2718 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
aoqi@0 2719 if (num_retire > 0) {
aoqi@0 2720 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
aoqi@0 2721 // Reset this list.
aoqi@0 2722 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
aoqi@0 2723 _indexedFreeList[i].set_size(i);
aoqi@0 2724 }
aoqi@0 2725 }
aoqi@0 2726 if (PrintOldPLAB) {
aoqi@0 2727 gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
aoqi@0 2728 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
aoqi@0 2729 }
aoqi@0 2730 // Reset stats for next round
aoqi@0 2731 _num_blocks[i] = 0;
aoqi@0 2732 }
aoqi@0 2733 }
aoqi@0 2734 }
aoqi@0 2735
jmasa@7234 2736 // Used by par_get_chunk_of_blocks() for the chunks from the
jmasa@7234 2737 // indexed_free_lists. Looks for a chunk with size that is a multiple
jmasa@7234 2738 // of "word_sz" and if found, splits it into "word_sz" chunks and add
jmasa@7234 2739 // to the free list "fl". "n" is the maximum number of chunks to
jmasa@7234 2740 // be added to "fl".
jmasa@7234 2741 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
aoqi@0 2742
aoqi@0 2743 // We'll try all multiples of word_sz in the indexed set, starting with
aoqi@0 2744 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
aoqi@0 2745 // then try getting a big chunk and splitting it.
aoqi@0 2746 {
aoqi@0 2747 bool found;
aoqi@0 2748 int k;
aoqi@0 2749 size_t cur_sz;
aoqi@0 2750 for (k = 1, cur_sz = k * word_sz, found = false;
aoqi@0 2751 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
aoqi@0 2752 (CMSSplitIndexedFreeListBlocks || k <= 1);
aoqi@0 2753 k++, cur_sz = k * word_sz) {
aoqi@0 2754 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
aoqi@0 2755 fl_for_cur_sz.set_size(cur_sz);
aoqi@0 2756 {
aoqi@0 2757 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
aoqi@0 2758 Mutex::_no_safepoint_check_flag);
aoqi@0 2759 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
aoqi@0 2760 if (gfl->count() != 0) {
aoqi@0 2761 // nn is the number of chunks of size cur_sz that
aoqi@0 2762 // we'd need to split k-ways each, in order to create
aoqi@0 2763 // "n" chunks of size word_sz each.
aoqi@0 2764 const size_t nn = MAX2(n/k, (size_t)1);
aoqi@0 2765 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
aoqi@0 2766 found = true;
aoqi@0 2767 if (k > 1) {
aoqi@0 2768 // Update split death stats for the cur_sz-size blocks list:
aoqi@0 2769 // we increment the split death count by the number of blocks
aoqi@0 2770 // we just took from the cur_sz-size blocks list and which
aoqi@0 2771 // we will be splitting below.
aoqi@0 2772 ssize_t deaths = gfl->split_deaths() +
aoqi@0 2773 fl_for_cur_sz.count();
aoqi@0 2774 gfl->set_split_deaths(deaths);
aoqi@0 2775 }
aoqi@0 2776 }
aoqi@0 2777 }
aoqi@0 2778 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
aoqi@0 2779 if (found) {
aoqi@0 2780 if (k == 1) {
aoqi@0 2781 fl->prepend(&fl_for_cur_sz);
aoqi@0 2782 } else {
aoqi@0 2783 // Divide each block on fl_for_cur_sz up k ways.
aoqi@0 2784 FreeChunk* fc;
aoqi@0 2785 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
aoqi@0 2786 // Must do this in reverse order, so that anybody attempting to
aoqi@0 2787 // access the main chunk sees it as a single free block until we
aoqi@0 2788 // change it.
aoqi@0 2789 size_t fc_size = fc->size();
aoqi@0 2790 assert(fc->is_free(), "Error");
aoqi@0 2791 for (int i = k-1; i >= 0; i--) {
aoqi@0 2792 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
aoqi@0 2793 assert((i != 0) ||
aoqi@0 2794 ((fc == ffc) && ffc->is_free() &&
aoqi@0 2795 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
aoqi@0 2796 "Counting error");
aoqi@0 2797 ffc->set_size(word_sz);
aoqi@0 2798 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2799 ffc->link_next(NULL);
aoqi@0 2800 // Above must occur before BOT is updated below.
aoqi@0 2801 OrderAccess::storestore();
aoqi@0 2802 // splitting from the right, fc_size == i * word_sz
aoqi@0 2803 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
aoqi@0 2804 fc_size -= word_sz;
aoqi@0 2805 assert(fc_size == i*word_sz, "Error");
aoqi@0 2806 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
aoqi@0 2807 _bt.verify_single_block((HeapWord*)fc, fc_size);
aoqi@0 2808 _bt.verify_single_block((HeapWord*)ffc, word_sz);
aoqi@0 2809 // Push this on "fl".
aoqi@0 2810 fl->return_chunk_at_head(ffc);
aoqi@0 2811 }
aoqi@0 2812 // TRAP
aoqi@0 2813 assert(fl->tail()->next() == NULL, "List invariant.");
aoqi@0 2814 }
aoqi@0 2815 }
aoqi@0 2816 // Update birth stats for this block size.
aoqi@0 2817 size_t num = fl->count();
aoqi@0 2818 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
aoqi@0 2819 Mutex::_no_safepoint_check_flag);
aoqi@0 2820 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
aoqi@0 2821 _indexedFreeList[word_sz].set_split_births(births);
jmasa@7234 2822 return true;
aoqi@0 2823 }
aoqi@0 2824 }
jmasa@7234 2825 return found;
aoqi@0 2826 }
jmasa@7234 2827 }
jmasa@7234 2828
jmasa@7234 2829 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
jmasa@7234 2830
aoqi@0 2831 FreeChunk* fc = NULL;
aoqi@0 2832 FreeChunk* rem_fc = NULL;
aoqi@0 2833 size_t rem;
aoqi@0 2834 {
aoqi@0 2835 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 2836 Mutex::_no_safepoint_check_flag);
aoqi@0 2837 while (n > 0) {
aoqi@0 2838 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
aoqi@0 2839 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 2840 if (fc != NULL) {
aoqi@0 2841 break;
aoqi@0 2842 } else {
aoqi@0 2843 n--;
aoqi@0 2844 }
aoqi@0 2845 }
jmasa@7234 2846 if (fc == NULL) return NULL;
aoqi@0 2847 // Otherwise, split up that block.
aoqi@0 2848 assert((ssize_t)n >= 1, "Control point invariant");
aoqi@0 2849 assert(fc->is_free(), "Error: should be a free block");
aoqi@0 2850 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 2851 const size_t nn = fc->size() / word_sz;
aoqi@0 2852 n = MIN2(nn, n);
aoqi@0 2853 assert((ssize_t)n >= 1, "Control point invariant");
aoqi@0 2854 rem = fc->size() - n * word_sz;
aoqi@0 2855 // If there is a remainder, and it's too small, allocate one fewer.
aoqi@0 2856 if (rem > 0 && rem < MinChunkSize) {
aoqi@0 2857 n--; rem += word_sz;
aoqi@0 2858 }
aoqi@0 2859 // Note that at this point we may have n == 0.
aoqi@0 2860 assert((ssize_t)n >= 0, "Control point invariant");
aoqi@0 2861
aoqi@0 2862 // If n is 0, the chunk fc that was found is not large
aoqi@0 2863 // enough to leave a viable remainder. We are unable to
aoqi@0 2864 // allocate even one block. Return fc to the
aoqi@0 2865 // dictionary and return, leaving "fl" empty.
aoqi@0 2866 if (n == 0) {
aoqi@0 2867 returnChunkToDictionary(fc);
jmasa@7234 2868 return NULL;
aoqi@0 2869 }
aoqi@0 2870
jmasa@7234 2871 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
jmasa@7234 2872 dictionary()->dict_census_update(fc->size(),
jmasa@7234 2873 true /*split*/,
jmasa@7234 2874 false /*birth*/);
jmasa@7234 2875
aoqi@0 2876 // First return the remainder, if any.
aoqi@0 2877 // Note that we hold the lock until we decide if we're going to give
aoqi@0 2878 // back the remainder to the dictionary, since a concurrent allocation
aoqi@0 2879 // may otherwise see the heap as empty. (We're willing to take that
aoqi@0 2880 // hit if the block is a small block.)
aoqi@0 2881 if (rem > 0) {
aoqi@0 2882 size_t prefix_size = n * word_sz;
aoqi@0 2883 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
aoqi@0 2884 rem_fc->set_size(rem);
aoqi@0 2885 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2886 rem_fc->link_next(NULL);
aoqi@0 2887 // Above must occur before BOT is updated below.
aoqi@0 2888 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
aoqi@0 2889 OrderAccess::storestore();
aoqi@0 2890 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
aoqi@0 2891 assert(fc->is_free(), "Error");
aoqi@0 2892 fc->set_size(prefix_size);
aoqi@0 2893 if (rem >= IndexSetSize) {
aoqi@0 2894 returnChunkToDictionary(rem_fc);
aoqi@0 2895 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
aoqi@0 2896 rem_fc = NULL;
aoqi@0 2897 }
aoqi@0 2898 // Otherwise, return it to the small list below.
aoqi@0 2899 }
aoqi@0 2900 }
aoqi@0 2901 if (rem_fc != NULL) {
aoqi@0 2902 MutexLockerEx x(_indexedFreeListParLocks[rem],
aoqi@0 2903 Mutex::_no_safepoint_check_flag);
aoqi@0 2904 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
aoqi@0 2905 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
aoqi@0 2906 smallSplitBirth(rem);
aoqi@0 2907 }
jmasa@7234 2908 assert(n * word_sz == fc->size(),
jmasa@7234 2909 err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
jmasa@7234 2910 SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
jmasa@7234 2911 fc->size(), n, word_sz));
jmasa@7234 2912 return fc;
jmasa@7234 2913 }
jmasa@7234 2914
jmasa@7234 2915 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7234 2916
jmasa@7234 2917 FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
jmasa@7234 2918
jmasa@7234 2919 if (fc == NULL) {
jmasa@7234 2920 return;
jmasa@7234 2921 }
jmasa@7234 2922
jmasa@7234 2923 size_t n = fc->size() / word_sz;
jmasa@7234 2924
jmasa@7234 2925 assert((ssize_t)n > 0, "Consistency");
aoqi@0 2926 // Now do the splitting up.
aoqi@0 2927 // Must do this in reverse order, so that anybody attempting to
aoqi@0 2928 // access the main chunk sees it as a single free block until we
aoqi@0 2929 // change it.
aoqi@0 2930 size_t fc_size = n * word_sz;
aoqi@0 2931 // All but first chunk in this loop
aoqi@0 2932 for (ssize_t i = n-1; i > 0; i--) {
aoqi@0 2933 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
aoqi@0 2934 ffc->set_size(word_sz);
aoqi@0 2935 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2936 ffc->link_next(NULL);
aoqi@0 2937 // Above must occur before BOT is updated below.
aoqi@0 2938 OrderAccess::storestore();
aoqi@0 2939 // splitting from the right, fc_size == (n - i + 1) * wordsize
aoqi@0 2940 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
aoqi@0 2941 fc_size -= word_sz;
aoqi@0 2942 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
aoqi@0 2943 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
aoqi@0 2944 _bt.verify_single_block((HeapWord*)fc, fc_size);
aoqi@0 2945 // Push this on "fl".
aoqi@0 2946 fl->return_chunk_at_head(ffc);
aoqi@0 2947 }
aoqi@0 2948 // First chunk
aoqi@0 2949 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
aoqi@0 2950 // The blocks above should show their new sizes before the first block below
aoqi@0 2951 fc->set_size(word_sz);
aoqi@0 2952 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
aoqi@0 2953 fc->link_next(NULL);
aoqi@0 2954 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 2955 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 2956 fl->return_chunk_at_head(fc);
aoqi@0 2957
aoqi@0 2958 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
aoqi@0 2959 {
aoqi@0 2960 // Update the stats for this block size.
aoqi@0 2961 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
aoqi@0 2962 Mutex::_no_safepoint_check_flag);
aoqi@0 2963 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
aoqi@0 2964 _indexedFreeList[word_sz].set_split_births(births);
aoqi@0 2965 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
aoqi@0 2966 // _indexedFreeList[word_sz].set_surplus(new_surplus);
aoqi@0 2967 }
aoqi@0 2968
aoqi@0 2969 // TRAP
aoqi@0 2970 assert(fl->tail()->next() == NULL, "List invariant.");
aoqi@0 2971 }
aoqi@0 2972
jmasa@7234 2973 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
jmasa@7234 2974 assert(fl->count() == 0, "Precondition.");
jmasa@7234 2975 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
jmasa@7234 2976 "Precondition");
jmasa@7234 2977
jmasa@7234 2978 if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
jmasa@7234 2979 // Got it
jmasa@7234 2980 return;
jmasa@7234 2981 }
jmasa@7234 2982
jmasa@7234 2983 // Otherwise, we'll split a block from the dictionary.
jmasa@7234 2984 par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
jmasa@7234 2985 }
jmasa@7234 2986
aoqi@0 2987 // Set up the space's par_seq_tasks structure for work claiming
aoqi@0 2988 // for parallel rescan. See CMSParRemarkTask where this is currently used.
aoqi@0 2989 // XXX Need to suitably abstract and generalize this and the next
aoqi@0 2990 // method into one.
aoqi@0 2991 void
aoqi@0 2992 CompactibleFreeListSpace::
aoqi@0 2993 initialize_sequential_subtasks_for_rescan(int n_threads) {
aoqi@0 2994 // The "size" of each task is fixed according to rescan_task_size.
aoqi@0 2995 assert(n_threads > 0, "Unexpected n_threads argument");
aoqi@0 2996 const size_t task_size = rescan_task_size();
aoqi@0 2997 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
aoqi@0 2998 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
aoqi@0 2999 assert(n_tasks == 0 ||
aoqi@0 3000 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
aoqi@0 3001 (used_region().start() + n_tasks*task_size >= used_region().end())),
aoqi@0 3002 "n_tasks calculation incorrect");
aoqi@0 3003 SequentialSubTasksDone* pst = conc_par_seq_tasks();
aoqi@0 3004 assert(!pst->valid(), "Clobbering existing data?");
aoqi@0 3005 // Sets the condition for completion of the subtask (how many threads
aoqi@0 3006 // need to finish in order to be done).
aoqi@0 3007 pst->set_n_threads(n_threads);
aoqi@0 3008 pst->set_n_tasks((int)n_tasks);
aoqi@0 3009 }
aoqi@0 3010
aoqi@0 3011 // Set up the space's par_seq_tasks structure for work claiming
aoqi@0 3012 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
aoqi@0 3013 void
aoqi@0 3014 CompactibleFreeListSpace::
aoqi@0 3015 initialize_sequential_subtasks_for_marking(int n_threads,
aoqi@0 3016 HeapWord* low) {
aoqi@0 3017 // The "size" of each task is fixed according to rescan_task_size.
aoqi@0 3018 assert(n_threads > 0, "Unexpected n_threads argument");
aoqi@0 3019 const size_t task_size = marking_task_size();
aoqi@0 3020 assert(task_size > CardTableModRefBS::card_size_in_words &&
aoqi@0 3021 (task_size % CardTableModRefBS::card_size_in_words == 0),
aoqi@0 3022 "Otherwise arithmetic below would be incorrect");
aoqi@0 3023 MemRegion span = _gen->reserved();
aoqi@0 3024 if (low != NULL) {
aoqi@0 3025 if (span.contains(low)) {
aoqi@0 3026 // Align low down to a card boundary so that
aoqi@0 3027 // we can use block_offset_careful() on span boundaries.
aoqi@0 3028 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
aoqi@0 3029 CardTableModRefBS::card_size);
aoqi@0 3030 // Clip span prefix at aligned_low
aoqi@0 3031 span = span.intersection(MemRegion(aligned_low, span.end()));
aoqi@0 3032 } else if (low > span.end()) {
aoqi@0 3033 span = MemRegion(low, low); // Null region
aoqi@0 3034 } // else use entire span
aoqi@0 3035 }
aoqi@0 3036 assert(span.is_empty() ||
aoqi@0 3037 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
aoqi@0 3038 "span should start at a card boundary");
aoqi@0 3039 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
aoqi@0 3040 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
aoqi@0 3041 assert(n_tasks == 0 ||
aoqi@0 3042 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
aoqi@0 3043 (span.start() + n_tasks*task_size >= span.end())),
aoqi@0 3044 "n_tasks calculation incorrect");
aoqi@0 3045 SequentialSubTasksDone* pst = conc_par_seq_tasks();
aoqi@0 3046 assert(!pst->valid(), "Clobbering existing data?");
aoqi@0 3047 // Sets the condition for completion of the subtask (how many threads
aoqi@0 3048 // need to finish in order to be done).
aoqi@0 3049 pst->set_n_threads(n_threads);
aoqi@0 3050 pst->set_n_tasks((int)n_tasks);
aoqi@0 3051 }

mercurial