src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6680
78bbf4d43a14
parent 0
f90c822e73f8
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
aoqi@0 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
aoqi@0 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
aoqi@0 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
aoqi@0 30 #include "gc_implementation/shared/liveRange.hpp"
aoqi@0 31 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 32 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 33 #include "memory/allocation.inline.hpp"
aoqi@0 34 #include "memory/blockOffsetTable.inline.hpp"
aoqi@0 35 #include "memory/resourceArea.hpp"
aoqi@0 36 #include "memory/universe.inline.hpp"
aoqi@0 37 #include "oops/oop.inline.hpp"
aoqi@0 38 #include "runtime/globals.hpp"
aoqi@0 39 #include "runtime/handles.inline.hpp"
aoqi@0 40 #include "runtime/init.hpp"
aoqi@0 41 #include "runtime/java.hpp"
aoqi@0 42 #include "runtime/vmThread.hpp"
aoqi@0 43 #include "utilities/copy.hpp"
aoqi@0 44
aoqi@0 45 /////////////////////////////////////////////////////////////////////////
aoqi@0 46 //// CompactibleFreeListSpace
aoqi@0 47 /////////////////////////////////////////////////////////////////////////
aoqi@0 48
aoqi@0 49 // highest ranked free list lock rank
aoqi@0 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
aoqi@0 51
aoqi@0 52 // Defaults are 0 so things will break badly if incorrectly initialized.
aoqi@0 53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
aoqi@0 54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
aoqi@0 55
aoqi@0 56 size_t MinChunkSize = 0;
aoqi@0 57
aoqi@0 58 void CompactibleFreeListSpace::set_cms_values() {
aoqi@0 59 // Set CMS global values
aoqi@0 60 assert(MinChunkSize == 0, "already set");
aoqi@0 61
aoqi@0 62 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
aoqi@0 63 // for chunks to contain a FreeChunk.
aoqi@0 64 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
aoqi@0 65 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
aoqi@0 66
aoqi@0 67 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
aoqi@0 68 IndexSetStart = MinChunkSize;
aoqi@0 69 IndexSetStride = MinObjAlignment;
aoqi@0 70 }
aoqi@0 71
aoqi@0 72 // Constructor
aoqi@0 73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
aoqi@0 74 MemRegion mr, bool use_adaptive_freelists,
aoqi@0 75 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
aoqi@0 76 _dictionaryChoice(dictionaryChoice),
aoqi@0 77 _adaptive_freelists(use_adaptive_freelists),
aoqi@0 78 _bt(bs, mr),
aoqi@0 79 // free list locks are in the range of values taken by _lockRank
aoqi@0 80 // This range currently is [_leaf+2, _leaf+3]
aoqi@0 81 // Note: this requires that CFLspace c'tors
aoqi@0 82 // are called serially in the order in which the locks are
aoqi@0 83 // are acquired in the program text. This is true today.
aoqi@0 84 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
aoqi@0 85 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
aoqi@0 86 "CompactibleFreeListSpace._dict_par_lock", true),
aoqi@0 87 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
aoqi@0 88 CMSRescanMultiple),
aoqi@0 89 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
aoqi@0 90 CMSConcMarkMultiple),
aoqi@0 91 _collector(NULL)
aoqi@0 92 {
aoqi@0 93 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
aoqi@0 94 "FreeChunk is larger than expected");
aoqi@0 95 _bt.set_space(this);
aoqi@0 96 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
aoqi@0 97 // We have all of "mr", all of which we place in the dictionary
aoqi@0 98 // as one big chunk. We'll need to decide here which of several
aoqi@0 99 // possible alternative dictionary implementations to use. For
aoqi@0 100 // now the choice is easy, since we have only one working
aoqi@0 101 // implementation, namely, the simple binary tree (splaying
aoqi@0 102 // temporarily disabled).
aoqi@0 103 switch (dictionaryChoice) {
aoqi@0 104 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
aoqi@0 105 _dictionary = new AFLBinaryTreeDictionary(mr);
aoqi@0 106 break;
aoqi@0 107 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
aoqi@0 108 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
aoqi@0 109 default:
aoqi@0 110 warning("dictionaryChoice: selected option not understood; using"
aoqi@0 111 " default BinaryTreeDictionary implementation instead.");
aoqi@0 112 }
aoqi@0 113 assert(_dictionary != NULL, "CMS dictionary initialization");
aoqi@0 114 // The indexed free lists are initially all empty and are lazily
aoqi@0 115 // filled in on demand. Initialize the array elements to NULL.
aoqi@0 116 initializeIndexedFreeListArray();
aoqi@0 117
aoqi@0 118 // Not using adaptive free lists assumes that allocation is first
aoqi@0 119 // from the linAB's. Also a cms perm gen which can be compacted
aoqi@0 120 // has to have the klass's klassKlass allocated at a lower
aoqi@0 121 // address in the heap than the klass so that the klassKlass is
aoqi@0 122 // moved to its new location before the klass is moved.
aoqi@0 123 // Set the _refillSize for the linear allocation blocks
aoqi@0 124 if (!use_adaptive_freelists) {
aoqi@0 125 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
aoqi@0 126 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 127 // The small linAB initially has all the space and will allocate
aoqi@0 128 // a chunk of any size.
aoqi@0 129 HeapWord* addr = (HeapWord*) fc;
aoqi@0 130 _smallLinearAllocBlock.set(addr, fc->size() ,
aoqi@0 131 1024*SmallForLinearAlloc, fc->size());
aoqi@0 132 // Note that _unallocated_block is not updated here.
aoqi@0 133 // Allocations from the linear allocation block should
aoqi@0 134 // update it.
aoqi@0 135 } else {
aoqi@0 136 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
aoqi@0 137 SmallForLinearAlloc);
aoqi@0 138 }
aoqi@0 139 // CMSIndexedFreeListReplenish should be at least 1
aoqi@0 140 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
aoqi@0 141 _promoInfo.setSpace(this);
aoqi@0 142 if (UseCMSBestFit) {
aoqi@0 143 _fitStrategy = FreeBlockBestFitFirst;
aoqi@0 144 } else {
aoqi@0 145 _fitStrategy = FreeBlockStrategyNone;
aoqi@0 146 }
aoqi@0 147 check_free_list_consistency();
aoqi@0 148
aoqi@0 149 // Initialize locks for parallel case.
aoqi@0 150
aoqi@0 151 if (CollectedHeap::use_parallel_gc_threads()) {
aoqi@0 152 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 153 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
aoqi@0 154 "a freelist par lock",
aoqi@0 155 true);
aoqi@0 156 DEBUG_ONLY(
aoqi@0 157 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
aoqi@0 158 )
aoqi@0 159 }
aoqi@0 160 _dictionary->set_par_lock(&_parDictionaryAllocLock);
aoqi@0 161 }
aoqi@0 162 }
aoqi@0 163
aoqi@0 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
aoqi@0 165 // update the block offset table. Removed initialize_threshold call because
aoqi@0 166 // CFLS does not use a block offset array for contiguous spaces.
aoqi@0 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
aoqi@0 168 CompactPoint* cp, HeapWord* compact_top) {
aoqi@0 169 // q is alive
aoqi@0 170 // First check if we should switch compaction space
aoqi@0 171 assert(this == cp->space, "'this' should be current compaction space.");
aoqi@0 172 size_t compaction_max_size = pointer_delta(end(), compact_top);
aoqi@0 173 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
aoqi@0 174 "virtual adjustObjectSize_v() method is not correct");
aoqi@0 175 size_t adjusted_size = adjustObjectSize(size);
aoqi@0 176 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
aoqi@0 177 "no small fragments allowed");
aoqi@0 178 assert(minimum_free_block_size() == MinChunkSize,
aoqi@0 179 "for de-virtualized reference below");
aoqi@0 180 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
aoqi@0 181 if (adjusted_size + MinChunkSize > compaction_max_size &&
aoqi@0 182 adjusted_size != compaction_max_size) {
aoqi@0 183 do {
aoqi@0 184 // switch to next compaction space
aoqi@0 185 cp->space->set_compaction_top(compact_top);
aoqi@0 186 cp->space = cp->space->next_compaction_space();
aoqi@0 187 if (cp->space == NULL) {
aoqi@0 188 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
aoqi@0 189 assert(cp->gen != NULL, "compaction must succeed");
aoqi@0 190 cp->space = cp->gen->first_compaction_space();
aoqi@0 191 assert(cp->space != NULL, "generation must have a first compaction space");
aoqi@0 192 }
aoqi@0 193 compact_top = cp->space->bottom();
aoqi@0 194 cp->space->set_compaction_top(compact_top);
aoqi@0 195 // The correct adjusted_size may not be the same as that for this method
aoqi@0 196 // (i.e., cp->space may no longer be "this" so adjust the size again.
aoqi@0 197 // Use the virtual method which is not used above to save the virtual
aoqi@0 198 // dispatch.
aoqi@0 199 adjusted_size = cp->space->adjust_object_size_v(size);
aoqi@0 200 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
aoqi@0 201 assert(cp->space->minimum_free_block_size() == 0, "just checking");
aoqi@0 202 } while (adjusted_size > compaction_max_size);
aoqi@0 203 }
aoqi@0 204
aoqi@0 205 // store the forwarding pointer into the mark word
aoqi@0 206 if ((HeapWord*)q != compact_top) {
aoqi@0 207 q->forward_to(oop(compact_top));
aoqi@0 208 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
aoqi@0 209 } else {
aoqi@0 210 // if the object isn't moving we can just set the mark to the default
aoqi@0 211 // mark and handle it specially later on.
aoqi@0 212 q->init_mark();
aoqi@0 213 assert(q->forwardee() == NULL, "should be forwarded to NULL");
aoqi@0 214 }
aoqi@0 215
aoqi@0 216 compact_top += adjusted_size;
aoqi@0 217
aoqi@0 218 // we need to update the offset table so that the beginnings of objects can be
aoqi@0 219 // found during scavenge. Note that we are updating the offset table based on
aoqi@0 220 // where the object will be once the compaction phase finishes.
aoqi@0 221
aoqi@0 222 // Always call cross_threshold(). A contiguous space can only call it when
aoqi@0 223 // the compaction_top exceeds the current threshold but not for an
aoqi@0 224 // non-contiguous space.
aoqi@0 225 cp->threshold =
aoqi@0 226 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
aoqi@0 227 return compact_top;
aoqi@0 228 }
aoqi@0 229
aoqi@0 230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
aoqi@0 231 // and use of single_block instead of alloc_block. The name here is not really
aoqi@0 232 // appropriate - maybe a more general name could be invented for both the
aoqi@0 233 // contiguous and noncontiguous spaces.
aoqi@0 234
aoqi@0 235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
aoqi@0 236 _bt.single_block(start, the_end);
aoqi@0 237 return end();
aoqi@0 238 }
aoqi@0 239
aoqi@0 240 // Initialize them to NULL.
aoqi@0 241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
aoqi@0 242 for (size_t i = 0; i < IndexSetSize; i++) {
aoqi@0 243 // Note that on platforms where objects are double word aligned,
aoqi@0 244 // the odd array elements are not used. It is convenient, however,
aoqi@0 245 // to map directly from the object size to the array element.
aoqi@0 246 _indexedFreeList[i].reset(IndexSetSize);
aoqi@0 247 _indexedFreeList[i].set_size(i);
aoqi@0 248 assert(_indexedFreeList[i].count() == 0, "reset check failed");
aoqi@0 249 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
aoqi@0 250 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
aoqi@0 251 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
aoqi@0 252 }
aoqi@0 253 }
aoqi@0 254
aoqi@0 255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
aoqi@0 256 for (size_t i = 1; i < IndexSetSize; i++) {
aoqi@0 257 assert(_indexedFreeList[i].size() == (size_t) i,
aoqi@0 258 "Indexed free list sizes are incorrect");
aoqi@0 259 _indexedFreeList[i].reset(IndexSetSize);
aoqi@0 260 assert(_indexedFreeList[i].count() == 0, "reset check failed");
aoqi@0 261 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
aoqi@0 262 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
aoqi@0 263 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
aoqi@0 264 }
aoqi@0 265 }
aoqi@0 266
aoqi@0 267 void CompactibleFreeListSpace::reset(MemRegion mr) {
aoqi@0 268 resetIndexedFreeListArray();
aoqi@0 269 dictionary()->reset();
aoqi@0 270 if (BlockOffsetArrayUseUnallocatedBlock) {
aoqi@0 271 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
aoqi@0 272 // Everything's allocated until proven otherwise.
aoqi@0 273 _bt.set_unallocated_block(end());
aoqi@0 274 }
aoqi@0 275 if (!mr.is_empty()) {
aoqi@0 276 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
aoqi@0 277 _bt.single_block(mr.start(), mr.word_size());
aoqi@0 278 FreeChunk* fc = (FreeChunk*) mr.start();
aoqi@0 279 fc->set_size(mr.word_size());
aoqi@0 280 if (mr.word_size() >= IndexSetSize ) {
aoqi@0 281 returnChunkToDictionary(fc);
aoqi@0 282 } else {
aoqi@0 283 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 284 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
aoqi@0 285 }
aoqi@0 286 coalBirth(mr.word_size());
aoqi@0 287 }
aoqi@0 288 _promoInfo.reset();
aoqi@0 289 _smallLinearAllocBlock._ptr = NULL;
aoqi@0 290 _smallLinearAllocBlock._word_size = 0;
aoqi@0 291 }
aoqi@0 292
aoqi@0 293 void CompactibleFreeListSpace::reset_after_compaction() {
aoqi@0 294 // Reset the space to the new reality - one free chunk.
aoqi@0 295 MemRegion mr(compaction_top(), end());
aoqi@0 296 reset(mr);
aoqi@0 297 // Now refill the linear allocation block(s) if possible.
aoqi@0 298 if (_adaptive_freelists) {
aoqi@0 299 refillLinearAllocBlocksIfNeeded();
aoqi@0 300 } else {
aoqi@0 301 // Place as much of mr in the linAB as we can get,
aoqi@0 302 // provided it was big enough to go into the dictionary.
aoqi@0 303 FreeChunk* fc = dictionary()->find_largest_dict();
aoqi@0 304 if (fc != NULL) {
aoqi@0 305 assert(fc->size() == mr.word_size(),
aoqi@0 306 "Why was the chunk broken up?");
aoqi@0 307 removeChunkFromDictionary(fc);
aoqi@0 308 HeapWord* addr = (HeapWord*) fc;
aoqi@0 309 _smallLinearAllocBlock.set(addr, fc->size() ,
aoqi@0 310 1024*SmallForLinearAlloc, fc->size());
aoqi@0 311 // Note that _unallocated_block is not updated here.
aoqi@0 312 }
aoqi@0 313 }
aoqi@0 314 }
aoqi@0 315
aoqi@0 316 // Walks the entire dictionary, returning a coterminal
aoqi@0 317 // chunk, if it exists. Use with caution since it involves
aoqi@0 318 // a potentially complete walk of a potentially large tree.
aoqi@0 319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
aoqi@0 320
aoqi@0 321 assert_lock_strong(&_freelistLock);
aoqi@0 322
aoqi@0 323 return dictionary()->find_chunk_ends_at(end());
aoqi@0 324 }
aoqi@0 325
aoqi@0 326
aoqi@0 327 #ifndef PRODUCT
aoqi@0 328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
aoqi@0 329 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 330 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
aoqi@0 331 }
aoqi@0 332 }
aoqi@0 333
aoqi@0 334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
aoqi@0 335 size_t sum = 0;
aoqi@0 336 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 337 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
aoqi@0 338 }
aoqi@0 339 return sum;
aoqi@0 340 }
aoqi@0 341
aoqi@0 342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
aoqi@0 343 size_t count = 0;
aoqi@0 344 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
aoqi@0 345 debug_only(
aoqi@0 346 ssize_t total_list_count = 0;
aoqi@0 347 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 348 fc = fc->next()) {
aoqi@0 349 total_list_count++;
aoqi@0 350 }
aoqi@0 351 assert(total_list_count == _indexedFreeList[i].count(),
aoqi@0 352 "Count in list is incorrect");
aoqi@0 353 )
aoqi@0 354 count += _indexedFreeList[i].count();
aoqi@0 355 }
aoqi@0 356 return count;
aoqi@0 357 }
aoqi@0 358
aoqi@0 359 size_t CompactibleFreeListSpace::totalCount() {
aoqi@0 360 size_t num = totalCountInIndexedFreeLists();
aoqi@0 361 num += dictionary()->total_count();
aoqi@0 362 if (_smallLinearAllocBlock._word_size != 0) {
aoqi@0 363 num++;
aoqi@0 364 }
aoqi@0 365 return num;
aoqi@0 366 }
aoqi@0 367 #endif
aoqi@0 368
aoqi@0 369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
aoqi@0 370 FreeChunk* fc = (FreeChunk*) p;
aoqi@0 371 return fc->is_free();
aoqi@0 372 }
aoqi@0 373
aoqi@0 374 size_t CompactibleFreeListSpace::used() const {
aoqi@0 375 return capacity() - free();
aoqi@0 376 }
aoqi@0 377
aoqi@0 378 size_t CompactibleFreeListSpace::free() const {
aoqi@0 379 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
aoqi@0 380 // if you do this while the structures are in flux you
aoqi@0 381 // may get an approximate answer only; for instance
aoqi@0 382 // because there is concurrent allocation either
aoqi@0 383 // directly by mutators or for promotion during a GC.
aoqi@0 384 // It's "MT-safe", however, in the sense that you are guaranteed
aoqi@0 385 // not to crash and burn, for instance, because of walking
aoqi@0 386 // pointers that could disappear as you were walking them.
aoqi@0 387 // The approximation is because the various components
aoqi@0 388 // that are read below are not read atomically (and
aoqi@0 389 // further the computation of totalSizeInIndexedFreeLists()
aoqi@0 390 // is itself a non-atomic computation. The normal use of
aoqi@0 391 // this is during a resize operation at the end of GC
aoqi@0 392 // and at that time you are guaranteed to get the
aoqi@0 393 // correct actual value. However, for instance, this is
aoqi@0 394 // also read completely asynchronously by the "perf-sampler"
aoqi@0 395 // that supports jvmstat, and you are apt to see the values
aoqi@0 396 // flicker in such cases.
aoqi@0 397 assert(_dictionary != NULL, "No _dictionary?");
aoqi@0 398 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
aoqi@0 399 totalSizeInIndexedFreeLists() +
aoqi@0 400 _smallLinearAllocBlock._word_size) * HeapWordSize;
aoqi@0 401 }
aoqi@0 402
aoqi@0 403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
aoqi@0 404 assert(_dictionary != NULL, "No _dictionary?");
aoqi@0 405 assert_locked();
aoqi@0 406 size_t res = _dictionary->max_chunk_size();
aoqi@0 407 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
aoqi@0 408 (size_t) SmallForLinearAlloc - 1));
aoqi@0 409 // XXX the following could potentially be pretty slow;
aoqi@0 410 // should one, pesimally for the rare cases when res
aoqi@0 411 // caclulated above is less than IndexSetSize,
aoqi@0 412 // just return res calculated above? My reasoning was that
aoqi@0 413 // those cases will be so rare that the extra time spent doesn't
aoqi@0 414 // really matter....
aoqi@0 415 // Note: do not change the loop test i >= res + IndexSetStride
aoqi@0 416 // to i > res below, because i is unsigned and res may be zero.
aoqi@0 417 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
aoqi@0 418 i -= IndexSetStride) {
aoqi@0 419 if (_indexedFreeList[i].head() != NULL) {
aoqi@0 420 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
aoqi@0 421 return i;
aoqi@0 422 }
aoqi@0 423 }
aoqi@0 424 return res;
aoqi@0 425 }
aoqi@0 426
aoqi@0 427 void LinearAllocBlock::print_on(outputStream* st) const {
aoqi@0 428 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
aoqi@0 429 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
aoqi@0 430 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
aoqi@0 431 }
aoqi@0 432
aoqi@0 433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
aoqi@0 434 st->print_cr("COMPACTIBLE FREELIST SPACE");
aoqi@0 435 st->print_cr(" Space:");
aoqi@0 436 Space::print_on(st);
aoqi@0 437
aoqi@0 438 st->print_cr("promoInfo:");
aoqi@0 439 _promoInfo.print_on(st);
aoqi@0 440
aoqi@0 441 st->print_cr("_smallLinearAllocBlock");
aoqi@0 442 _smallLinearAllocBlock.print_on(st);
aoqi@0 443
aoqi@0 444 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
aoqi@0 445
aoqi@0 446 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
aoqi@0 447 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
aoqi@0 448 }
aoqi@0 449
aoqi@0 450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
aoqi@0 451 const {
aoqi@0 452 reportIndexedFreeListStatistics();
aoqi@0 453 gclog_or_tty->print_cr("Layout of Indexed Freelists");
aoqi@0 454 gclog_or_tty->print_cr("---------------------------");
aoqi@0 455 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
aoqi@0 456 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 457 _indexedFreeList[i].print_on(gclog_or_tty);
aoqi@0 458 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 459 fc = fc->next()) {
aoqi@0 460 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
aoqi@0 461 p2i(fc), p2i((HeapWord*)fc + i),
aoqi@0 462 fc->cantCoalesce() ? "\t CC" : "");
aoqi@0 463 }
aoqi@0 464 }
aoqi@0 465 }
aoqi@0 466
aoqi@0 467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
aoqi@0 468 const {
aoqi@0 469 _promoInfo.print_on(st);
aoqi@0 470 }
aoqi@0 471
aoqi@0 472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
aoqi@0 473 const {
aoqi@0 474 _dictionary->report_statistics();
aoqi@0 475 st->print_cr("Layout of Freelists in Tree");
aoqi@0 476 st->print_cr("---------------------------");
aoqi@0 477 _dictionary->print_free_lists(st);
aoqi@0 478 }
aoqi@0 479
aoqi@0 480 class BlkPrintingClosure: public BlkClosure {
aoqi@0 481 const CMSCollector* _collector;
aoqi@0 482 const CompactibleFreeListSpace* _sp;
aoqi@0 483 const CMSBitMap* _live_bit_map;
aoqi@0 484 const bool _post_remark;
aoqi@0 485 outputStream* _st;
aoqi@0 486 public:
aoqi@0 487 BlkPrintingClosure(const CMSCollector* collector,
aoqi@0 488 const CompactibleFreeListSpace* sp,
aoqi@0 489 const CMSBitMap* live_bit_map,
aoqi@0 490 outputStream* st):
aoqi@0 491 _collector(collector),
aoqi@0 492 _sp(sp),
aoqi@0 493 _live_bit_map(live_bit_map),
aoqi@0 494 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
aoqi@0 495 _st(st) { }
aoqi@0 496 size_t do_blk(HeapWord* addr);
aoqi@0 497 };
aoqi@0 498
aoqi@0 499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
aoqi@0 500 size_t sz = _sp->block_size_no_stall(addr, _collector);
aoqi@0 501 assert(sz != 0, "Should always be able to compute a size");
aoqi@0 502 if (_sp->block_is_obj(addr)) {
aoqi@0 503 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
aoqi@0 504 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
aoqi@0 505 p2i(addr),
aoqi@0 506 dead ? "dead" : "live",
aoqi@0 507 sz,
aoqi@0 508 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
aoqi@0 509 if (CMSPrintObjectsInDump && !dead) {
aoqi@0 510 oop(addr)->print_on(_st);
aoqi@0 511 _st->print_cr("--------------------------------------");
aoqi@0 512 }
aoqi@0 513 } else { // free block
aoqi@0 514 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
aoqi@0 515 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
aoqi@0 516 if (CMSPrintChunksInDump) {
aoqi@0 517 ((FreeChunk*)addr)->print_on(_st);
aoqi@0 518 _st->print_cr("--------------------------------------");
aoqi@0 519 }
aoqi@0 520 }
aoqi@0 521 return sz;
aoqi@0 522 }
aoqi@0 523
aoqi@0 524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
aoqi@0 525 outputStream* st) {
aoqi@0 526 st->print_cr("\n=========================");
aoqi@0 527 st->print_cr("Block layout in CMS Heap:");
aoqi@0 528 st->print_cr("=========================");
aoqi@0 529 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
aoqi@0 530 blk_iterate(&bpcl);
aoqi@0 531
aoqi@0 532 st->print_cr("\n=======================================");
aoqi@0 533 st->print_cr("Order & Layout of Promotion Info Blocks");
aoqi@0 534 st->print_cr("=======================================");
aoqi@0 535 print_promo_info_blocks(st);
aoqi@0 536
aoqi@0 537 st->print_cr("\n===========================");
aoqi@0 538 st->print_cr("Order of Indexed Free Lists");
aoqi@0 539 st->print_cr("=========================");
aoqi@0 540 print_indexed_free_lists(st);
aoqi@0 541
aoqi@0 542 st->print_cr("\n=================================");
aoqi@0 543 st->print_cr("Order of Free Lists in Dictionary");
aoqi@0 544 st->print_cr("=================================");
aoqi@0 545 print_dictionary_free_lists(st);
aoqi@0 546 }
aoqi@0 547
aoqi@0 548
aoqi@0 549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
aoqi@0 550 assert_lock_strong(&_freelistLock);
aoqi@0 551 assert(PrintFLSStatistics != 0, "Reporting error");
aoqi@0 552 _dictionary->report_statistics();
aoqi@0 553 if (PrintFLSStatistics > 1) {
aoqi@0 554 reportIndexedFreeListStatistics();
aoqi@0 555 size_t total_size = totalSizeInIndexedFreeLists() +
aoqi@0 556 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
aoqi@0 557 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
aoqi@0 558 }
aoqi@0 559 }
aoqi@0 560
aoqi@0 561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
aoqi@0 562 assert_lock_strong(&_freelistLock);
aoqi@0 563 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
aoqi@0 564 "--------------------------------\n");
aoqi@0 565 size_t total_size = totalSizeInIndexedFreeLists();
aoqi@0 566 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
aoqi@0 567 gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
aoqi@0 568 gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
aoqi@0 569 gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
aoqi@0 570 if (free_blocks != 0) {
aoqi@0 571 gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks);
aoqi@0 572 }
aoqi@0 573 }
aoqi@0 574
aoqi@0 575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
aoqi@0 576 size_t res = 0;
aoqi@0 577 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 578 debug_only(
aoqi@0 579 ssize_t recount = 0;
aoqi@0 580 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 581 fc = fc->next()) {
aoqi@0 582 recount += 1;
aoqi@0 583 }
aoqi@0 584 assert(recount == _indexedFreeList[i].count(),
aoqi@0 585 "Incorrect count in list");
aoqi@0 586 )
aoqi@0 587 res += _indexedFreeList[i].count();
aoqi@0 588 }
aoqi@0 589 return res;
aoqi@0 590 }
aoqi@0 591
aoqi@0 592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
aoqi@0 593 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
aoqi@0 594 if (_indexedFreeList[i].head() != NULL) {
aoqi@0 595 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
aoqi@0 596 return (size_t)i;
aoqi@0 597 }
aoqi@0 598 }
aoqi@0 599 return 0;
aoqi@0 600 }
aoqi@0 601
aoqi@0 602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
aoqi@0 603 HeapWord* prevEnd = end();
aoqi@0 604 assert(prevEnd != value, "unnecessary set_end call");
aoqi@0 605 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
aoqi@0 606 "New end is below unallocated block");
aoqi@0 607 _end = value;
aoqi@0 608 if (prevEnd != NULL) {
aoqi@0 609 // Resize the underlying block offset table.
aoqi@0 610 _bt.resize(pointer_delta(value, bottom()));
aoqi@0 611 if (value <= prevEnd) {
aoqi@0 612 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
aoqi@0 613 "New end is below unallocated block");
aoqi@0 614 } else {
aoqi@0 615 // Now, take this new chunk and add it to the free blocks.
aoqi@0 616 // Note that the BOT has not yet been updated for this block.
aoqi@0 617 size_t newFcSize = pointer_delta(value, prevEnd);
aoqi@0 618 // XXX This is REALLY UGLY and should be fixed up. XXX
aoqi@0 619 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
aoqi@0 620 // Mark the boundary of the new block in BOT
aoqi@0 621 _bt.mark_block(prevEnd, value);
aoqi@0 622 // put it all in the linAB
aoqi@0 623 if (ParallelGCThreads == 0) {
aoqi@0 624 _smallLinearAllocBlock._ptr = prevEnd;
aoqi@0 625 _smallLinearAllocBlock._word_size = newFcSize;
aoqi@0 626 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 627 } else { // ParallelGCThreads > 0
aoqi@0 628 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 629 Mutex::_no_safepoint_check_flag);
aoqi@0 630 _smallLinearAllocBlock._ptr = prevEnd;
aoqi@0 631 _smallLinearAllocBlock._word_size = newFcSize;
aoqi@0 632 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 633 }
aoqi@0 634 // Births of chunks put into a LinAB are not recorded. Births
aoqi@0 635 // of chunks as they are allocated out of a LinAB are.
aoqi@0 636 } else {
aoqi@0 637 // Add the block to the free lists, if possible coalescing it
aoqi@0 638 // with the last free block, and update the BOT and census data.
aoqi@0 639 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
aoqi@0 640 }
aoqi@0 641 }
aoqi@0 642 }
aoqi@0 643 }
aoqi@0 644
aoqi@0 645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
aoqi@0 646 CompactibleFreeListSpace* _cfls;
aoqi@0 647 CMSCollector* _collector;
aoqi@0 648 protected:
aoqi@0 649 // Override.
aoqi@0 650 #define walk_mem_region_with_cl_DECL(ClosureType) \
aoqi@0 651 virtual void walk_mem_region_with_cl(MemRegion mr, \
aoqi@0 652 HeapWord* bottom, HeapWord* top, \
aoqi@0 653 ClosureType* cl); \
aoqi@0 654 void walk_mem_region_with_cl_par(MemRegion mr, \
aoqi@0 655 HeapWord* bottom, HeapWord* top, \
aoqi@0 656 ClosureType* cl); \
aoqi@0 657 void walk_mem_region_with_cl_nopar(MemRegion mr, \
aoqi@0 658 HeapWord* bottom, HeapWord* top, \
aoqi@0 659 ClosureType* cl)
aoqi@0 660 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
aoqi@0 661 walk_mem_region_with_cl_DECL(FilteringClosure);
aoqi@0 662
aoqi@0 663 public:
aoqi@0 664 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
aoqi@0 665 CMSCollector* collector,
aoqi@0 666 ExtendedOopClosure* cl,
aoqi@0 667 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 668 HeapWord* boundary) :
aoqi@0 669 Filtering_DCTOC(sp, cl, precision, boundary),
aoqi@0 670 _cfls(sp), _collector(collector) {}
aoqi@0 671 };
aoqi@0 672
aoqi@0 673 // We de-virtualize the block-related calls below, since we know that our
aoqi@0 674 // space is a CompactibleFreeListSpace.
aoqi@0 675
aoqi@0 676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
aoqi@0 677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
aoqi@0 678 HeapWord* bottom, \
aoqi@0 679 HeapWord* top, \
aoqi@0 680 ClosureType* cl) { \
aoqi@0 681 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
aoqi@0 682 if (is_par) { \
aoqi@0 683 assert(SharedHeap::heap()->n_par_threads() == \
aoqi@0 684 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
aoqi@0 685 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
aoqi@0 686 } else { \
aoqi@0 687 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
aoqi@0 688 } \
aoqi@0 689 } \
aoqi@0 690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
aoqi@0 691 HeapWord* bottom, \
aoqi@0 692 HeapWord* top, \
aoqi@0 693 ClosureType* cl) { \
aoqi@0 694 /* Skip parts that are before "mr", in case "block_start" sent us \
aoqi@0 695 back too far. */ \
aoqi@0 696 HeapWord* mr_start = mr.start(); \
aoqi@0 697 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 698 HeapWord* next = bottom + bot_size; \
aoqi@0 699 while (next < mr_start) { \
aoqi@0 700 bottom = next; \
aoqi@0 701 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 702 next = bottom + bot_size; \
aoqi@0 703 } \
aoqi@0 704 \
aoqi@0 705 while (bottom < top) { \
aoqi@0 706 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
aoqi@0 707 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
aoqi@0 708 oop(bottom)) && \
aoqi@0 709 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
aoqi@0 710 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
aoqi@0 711 bottom += _cfls->adjustObjectSize(word_sz); \
aoqi@0 712 } else { \
aoqi@0 713 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
aoqi@0 714 } \
aoqi@0 715 } \
aoqi@0 716 } \
aoqi@0 717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
aoqi@0 718 HeapWord* bottom, \
aoqi@0 719 HeapWord* top, \
aoqi@0 720 ClosureType* cl) { \
aoqi@0 721 /* Skip parts that are before "mr", in case "block_start" sent us \
aoqi@0 722 back too far. */ \
aoqi@0 723 HeapWord* mr_start = mr.start(); \
aoqi@0 724 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 725 HeapWord* next = bottom + bot_size; \
aoqi@0 726 while (next < mr_start) { \
aoqi@0 727 bottom = next; \
aoqi@0 728 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 729 next = bottom + bot_size; \
aoqi@0 730 } \
aoqi@0 731 \
aoqi@0 732 while (bottom < top) { \
aoqi@0 733 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
aoqi@0 734 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
aoqi@0 735 oop(bottom)) && \
aoqi@0 736 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
aoqi@0 737 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
aoqi@0 738 bottom += _cfls->adjustObjectSize(word_sz); \
aoqi@0 739 } else { \
aoqi@0 740 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
aoqi@0 741 } \
aoqi@0 742 } \
aoqi@0 743 }
aoqi@0 744
aoqi@0 745 // (There are only two of these, rather than N, because the split is due
aoqi@0 746 // only to the introduction of the FilteringClosure, a local part of the
aoqi@0 747 // impl of this abstraction.)
aoqi@0 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
aoqi@0 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
aoqi@0 750
aoqi@0 751 DirtyCardToOopClosure*
aoqi@0 752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
aoqi@0 753 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 754 HeapWord* boundary) {
aoqi@0 755 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
aoqi@0 756 }
aoqi@0 757
aoqi@0 758
aoqi@0 759 // Note on locking for the space iteration functions:
aoqi@0 760 // since the collector's iteration activities are concurrent with
aoqi@0 761 // allocation activities by mutators, absent a suitable mutual exclusion
aoqi@0 762 // mechanism the iterators may go awry. For instace a block being iterated
aoqi@0 763 // may suddenly be allocated or divided up and part of it allocated and
aoqi@0 764 // so on.
aoqi@0 765
aoqi@0 766 // Apply the given closure to each block in the space.
aoqi@0 767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
aoqi@0 768 assert_lock_strong(freelistLock());
aoqi@0 769 HeapWord *cur, *limit;
aoqi@0 770 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 771 cur += cl->do_blk_careful(cur));
aoqi@0 772 }
aoqi@0 773
aoqi@0 774 // Apply the given closure to each block in the space.
aoqi@0 775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
aoqi@0 776 assert_lock_strong(freelistLock());
aoqi@0 777 HeapWord *cur, *limit;
aoqi@0 778 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 779 cur += cl->do_blk(cur));
aoqi@0 780 }
aoqi@0 781
aoqi@0 782 // Apply the given closure to each oop in the space.
aoqi@0 783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
aoqi@0 784 assert_lock_strong(freelistLock());
aoqi@0 785 HeapWord *cur, *limit;
aoqi@0 786 size_t curSize;
aoqi@0 787 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 788 cur += curSize) {
aoqi@0 789 curSize = block_size(cur);
aoqi@0 790 if (block_is_obj(cur)) {
aoqi@0 791 oop(cur)->oop_iterate(cl);
aoqi@0 792 }
aoqi@0 793 }
aoqi@0 794 }
aoqi@0 795
aoqi@0 796 // Apply the given closure to each oop in the space \intersect memory region.
aoqi@0 797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
aoqi@0 798 assert_lock_strong(freelistLock());
aoqi@0 799 if (is_empty()) {
aoqi@0 800 return;
aoqi@0 801 }
aoqi@0 802 MemRegion cur = MemRegion(bottom(), end());
aoqi@0 803 mr = mr.intersection(cur);
aoqi@0 804 if (mr.is_empty()) {
aoqi@0 805 return;
aoqi@0 806 }
aoqi@0 807 if (mr.equals(cur)) {
aoqi@0 808 oop_iterate(cl);
aoqi@0 809 return;
aoqi@0 810 }
aoqi@0 811 assert(mr.end() <= end(), "just took an intersection above");
aoqi@0 812 HeapWord* obj_addr = block_start(mr.start());
aoqi@0 813 HeapWord* t = mr.end();
aoqi@0 814
aoqi@0 815 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
aoqi@0 816 if (block_is_obj(obj_addr)) {
aoqi@0 817 // Handle first object specially.
aoqi@0 818 oop obj = oop(obj_addr);
aoqi@0 819 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
aoqi@0 820 } else {
aoqi@0 821 FreeChunk* fc = (FreeChunk*)obj_addr;
aoqi@0 822 obj_addr += fc->size();
aoqi@0 823 }
aoqi@0 824 while (obj_addr < t) {
aoqi@0 825 HeapWord* obj = obj_addr;
aoqi@0 826 obj_addr += block_size(obj_addr);
aoqi@0 827 // If "obj_addr" is not greater than top, then the
aoqi@0 828 // entire object "obj" is within the region.
aoqi@0 829 if (obj_addr <= t) {
aoqi@0 830 if (block_is_obj(obj)) {
aoqi@0 831 oop(obj)->oop_iterate(cl);
aoqi@0 832 }
aoqi@0 833 } else {
aoqi@0 834 // "obj" extends beyond end of region
aoqi@0 835 if (block_is_obj(obj)) {
aoqi@0 836 oop(obj)->oop_iterate(&smr_blk);
aoqi@0 837 }
aoqi@0 838 break;
aoqi@0 839 }
aoqi@0 840 }
aoqi@0 841 }
aoqi@0 842
aoqi@0 843 // NOTE: In the following methods, in order to safely be able to
aoqi@0 844 // apply the closure to an object, we need to be sure that the
aoqi@0 845 // object has been initialized. We are guaranteed that an object
aoqi@0 846 // is initialized if we are holding the Heap_lock with the
aoqi@0 847 // world stopped.
aoqi@0 848 void CompactibleFreeListSpace::verify_objects_initialized() const {
aoqi@0 849 if (is_init_completed()) {
aoqi@0 850 assert_locked_or_safepoint(Heap_lock);
aoqi@0 851 if (Universe::is_fully_initialized()) {
aoqi@0 852 guarantee(SafepointSynchronize::is_at_safepoint(),
aoqi@0 853 "Required for objects to be initialized");
aoqi@0 854 }
aoqi@0 855 } // else make a concession at vm start-up
aoqi@0 856 }
aoqi@0 857
aoqi@0 858 // Apply the given closure to each object in the space
aoqi@0 859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
aoqi@0 860 assert_lock_strong(freelistLock());
aoqi@0 861 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 862 HeapWord *cur, *limit;
aoqi@0 863 size_t curSize;
aoqi@0 864 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 865 cur += curSize) {
aoqi@0 866 curSize = block_size(cur);
aoqi@0 867 if (block_is_obj(cur)) {
aoqi@0 868 blk->do_object(oop(cur));
aoqi@0 869 }
aoqi@0 870 }
aoqi@0 871 }
aoqi@0 872
aoqi@0 873 // Apply the given closure to each live object in the space
aoqi@0 874 // The usage of CompactibleFreeListSpace
aoqi@0 875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
aoqi@0 876 // objects in the space with references to objects that are no longer
aoqi@0 877 // valid. For example, an object may reference another object
aoqi@0 878 // that has already been sweep up (collected). This method uses
aoqi@0 879 // obj_is_alive() to determine whether it is safe to apply the closure to
aoqi@0 880 // an object. See obj_is_alive() for details on how liveness of an
aoqi@0 881 // object is decided.
aoqi@0 882
aoqi@0 883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
aoqi@0 884 assert_lock_strong(freelistLock());
aoqi@0 885 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 886 HeapWord *cur, *limit;
aoqi@0 887 size_t curSize;
aoqi@0 888 for (cur = bottom(), limit = end(); cur < limit;
aoqi@0 889 cur += curSize) {
aoqi@0 890 curSize = block_size(cur);
aoqi@0 891 if (block_is_obj(cur) && obj_is_alive(cur)) {
aoqi@0 892 blk->do_object(oop(cur));
aoqi@0 893 }
aoqi@0 894 }
aoqi@0 895 }
aoqi@0 896
aoqi@0 897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
aoqi@0 898 UpwardsObjectClosure* cl) {
aoqi@0 899 assert_locked(freelistLock());
aoqi@0 900 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 901 Space::object_iterate_mem(mr, cl);
aoqi@0 902 }
aoqi@0 903
aoqi@0 904 // Callers of this iterator beware: The closure application should
aoqi@0 905 // be robust in the face of uninitialized objects and should (always)
aoqi@0 906 // return a correct size so that the next addr + size below gives us a
aoqi@0 907 // valid block boundary. [See for instance,
aoqi@0 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
aoqi@0 909 // in ConcurrentMarkSweepGeneration.cpp.]
aoqi@0 910 HeapWord*
aoqi@0 911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
aoqi@0 912 assert_lock_strong(freelistLock());
aoqi@0 913 HeapWord *addr, *last;
aoqi@0 914 size_t size;
aoqi@0 915 for (addr = bottom(), last = end();
aoqi@0 916 addr < last; addr += size) {
aoqi@0 917 FreeChunk* fc = (FreeChunk*)addr;
aoqi@0 918 if (fc->is_free()) {
aoqi@0 919 // Since we hold the free list lock, which protects direct
aoqi@0 920 // allocation in this generation by mutators, a free object
aoqi@0 921 // will remain free throughout this iteration code.
aoqi@0 922 size = fc->size();
aoqi@0 923 } else {
aoqi@0 924 // Note that the object need not necessarily be initialized,
aoqi@0 925 // because (for instance) the free list lock does NOT protect
aoqi@0 926 // object initialization. The closure application below must
aoqi@0 927 // therefore be correct in the face of uninitialized objects.
aoqi@0 928 size = cl->do_object_careful(oop(addr));
aoqi@0 929 if (size == 0) {
aoqi@0 930 // An unparsable object found. Signal early termination.
aoqi@0 931 return addr;
aoqi@0 932 }
aoqi@0 933 }
aoqi@0 934 }
aoqi@0 935 return NULL;
aoqi@0 936 }
aoqi@0 937
aoqi@0 938 // Callers of this iterator beware: The closure application should
aoqi@0 939 // be robust in the face of uninitialized objects and should (always)
aoqi@0 940 // return a correct size so that the next addr + size below gives us a
aoqi@0 941 // valid block boundary. [See for instance,
aoqi@0 942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
aoqi@0 943 // in ConcurrentMarkSweepGeneration.cpp.]
aoqi@0 944 HeapWord*
aoqi@0 945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
aoqi@0 946 ObjectClosureCareful* cl) {
aoqi@0 947 assert_lock_strong(freelistLock());
aoqi@0 948 // Can't use used_region() below because it may not necessarily
aoqi@0 949 // be the same as [bottom(),end()); although we could
aoqi@0 950 // use [used_region().start(),round_to(used_region().end(),CardSize)),
aoqi@0 951 // that appears too cumbersome, so we just do the simpler check
aoqi@0 952 // in the assertion below.
aoqi@0 953 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
aoqi@0 954 "mr should be non-empty and within used space");
aoqi@0 955 HeapWord *addr, *end;
aoqi@0 956 size_t size;
aoqi@0 957 for (addr = block_start_careful(mr.start()), end = mr.end();
aoqi@0 958 addr < end; addr += size) {
aoqi@0 959 FreeChunk* fc = (FreeChunk*)addr;
aoqi@0 960 if (fc->is_free()) {
aoqi@0 961 // Since we hold the free list lock, which protects direct
aoqi@0 962 // allocation in this generation by mutators, a free object
aoqi@0 963 // will remain free throughout this iteration code.
aoqi@0 964 size = fc->size();
aoqi@0 965 } else {
aoqi@0 966 // Note that the object need not necessarily be initialized,
aoqi@0 967 // because (for instance) the free list lock does NOT protect
aoqi@0 968 // object initialization. The closure application below must
aoqi@0 969 // therefore be correct in the face of uninitialized objects.
aoqi@0 970 size = cl->do_object_careful_m(oop(addr), mr);
aoqi@0 971 if (size == 0) {
aoqi@0 972 // An unparsable object found. Signal early termination.
aoqi@0 973 return addr;
aoqi@0 974 }
aoqi@0 975 }
aoqi@0 976 }
aoqi@0 977 return NULL;
aoqi@0 978 }
aoqi@0 979
aoqi@0 980
aoqi@0 981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
aoqi@0 982 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 983 return _bt.block_start(p);
aoqi@0 984 }
aoqi@0 985
aoqi@0 986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
aoqi@0 987 return _bt.block_start_careful(p);
aoqi@0 988 }
aoqi@0 989
aoqi@0 990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
aoqi@0 991 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 992 // This must be volatile, or else there is a danger that the compiler
aoqi@0 993 // will compile the code below into a sometimes-infinite loop, by keeping
aoqi@0 994 // the value read the first time in a register.
aoqi@0 995 while (true) {
aoqi@0 996 // We must do this until we get a consistent view of the object.
aoqi@0 997 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 998 volatile FreeChunk* fc = (volatile FreeChunk*)p;
aoqi@0 999 size_t res = fc->size();
aoqi@0 1000
aoqi@0 1001 // Bugfix for systems with weak memory model (PPC64/IA64). The
aoqi@0 1002 // block's free bit was set and we have read the size of the
aoqi@0 1003 // block. Acquire and check the free bit again. If the block is
aoqi@0 1004 // still free, the read size is correct.
aoqi@0 1005 OrderAccess::acquire();
aoqi@0 1006
aoqi@0 1007 // If the object is still a free chunk, return the size, else it
aoqi@0 1008 // has been allocated so try again.
aoqi@0 1009 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 1010 assert(res != 0, "Block size should not be 0");
aoqi@0 1011 return res;
aoqi@0 1012 }
aoqi@0 1013 } else {
aoqi@0 1014 // must read from what 'p' points to in each loop.
aoqi@0 1015 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
aoqi@0 1016 if (k != NULL) {
aoqi@0 1017 assert(k->is_klass(), "Should really be klass oop.");
aoqi@0 1018 oop o = (oop)p;
aoqi@0 1019 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
aoqi@0 1020
aoqi@0 1021 // Bugfix for systems with weak memory model (PPC64/IA64).
aoqi@0 1022 // The object o may be an array. Acquire to make sure that the array
aoqi@0 1023 // size (third word) is consistent.
aoqi@0 1024 OrderAccess::acquire();
aoqi@0 1025
aoqi@0 1026 size_t res = o->size_given_klass(k);
aoqi@0 1027 res = adjustObjectSize(res);
aoqi@0 1028 assert(res != 0, "Block size should not be 0");
aoqi@0 1029 return res;
aoqi@0 1030 }
aoqi@0 1031 }
aoqi@0 1032 }
aoqi@0 1033 }
aoqi@0 1034
aoqi@0 1035 // TODO: Now that is_parsable is gone, we should combine these two functions.
aoqi@0 1036 // A variant of the above that uses the Printezis bits for
aoqi@0 1037 // unparsable but allocated objects. This avoids any possible
aoqi@0 1038 // stalls waiting for mutators to initialize objects, and is
aoqi@0 1039 // thus potentially faster than the variant above. However,
aoqi@0 1040 // this variant may return a zero size for a block that is
aoqi@0 1041 // under mutation and for which a consistent size cannot be
aoqi@0 1042 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
aoqi@0 1043 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
aoqi@0 1044 const CMSCollector* c)
aoqi@0 1045 const {
aoqi@0 1046 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
aoqi@0 1047 // This must be volatile, or else there is a danger that the compiler
aoqi@0 1048 // will compile the code below into a sometimes-infinite loop, by keeping
aoqi@0 1049 // the value read the first time in a register.
aoqi@0 1050 DEBUG_ONLY(uint loops = 0;)
aoqi@0 1051 while (true) {
aoqi@0 1052 // We must do this until we get a consistent view of the object.
aoqi@0 1053 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 1054 volatile FreeChunk* fc = (volatile FreeChunk*)p;
aoqi@0 1055 size_t res = fc->size();
aoqi@0 1056
aoqi@0 1057 // Bugfix for systems with weak memory model (PPC64/IA64). The
aoqi@0 1058 // free bit of the block was set and we have read the size of
aoqi@0 1059 // the block. Acquire and check the free bit again. If the
aoqi@0 1060 // block is still free, the read size is correct.
aoqi@0 1061 OrderAccess::acquire();
aoqi@0 1062
aoqi@0 1063 if (FreeChunk::indicatesFreeChunk(p)) {
aoqi@0 1064 assert(res != 0, "Block size should not be 0");
aoqi@0 1065 assert(loops == 0, "Should be 0");
aoqi@0 1066 return res;
aoqi@0 1067 }
aoqi@0 1068 } else {
aoqi@0 1069 // must read from what 'p' points to in each loop.
aoqi@0 1070 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
aoqi@0 1071 // We trust the size of any object that has a non-NULL
aoqi@0 1072 // klass and (for those in the perm gen) is parsable
aoqi@0 1073 // -- irrespective of its conc_safe-ty.
aoqi@0 1074 if (k != NULL) {
aoqi@0 1075 assert(k->is_klass(), "Should really be klass oop.");
aoqi@0 1076 oop o = (oop)p;
aoqi@0 1077 assert(o->is_oop(), "Should be an oop");
aoqi@0 1078
aoqi@0 1079 // Bugfix for systems with weak memory model (PPC64/IA64).
aoqi@0 1080 // The object o may be an array. Acquire to make sure that the array
aoqi@0 1081 // size (third word) is consistent.
aoqi@0 1082 OrderAccess::acquire();
aoqi@0 1083
aoqi@0 1084 size_t res = o->size_given_klass(k);
aoqi@0 1085 res = adjustObjectSize(res);
aoqi@0 1086 assert(res != 0, "Block size should not be 0");
aoqi@0 1087 return res;
aoqi@0 1088 } else {
aoqi@0 1089 // May return 0 if P-bits not present.
aoqi@0 1090 return c->block_size_if_printezis_bits(p);
aoqi@0 1091 }
aoqi@0 1092 }
aoqi@0 1093 assert(loops == 0, "Can loop at most once");
aoqi@0 1094 DEBUG_ONLY(loops++;)
aoqi@0 1095 }
aoqi@0 1096 }
aoqi@0 1097
aoqi@0 1098 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
aoqi@0 1099 NOT_PRODUCT(verify_objects_initialized());
aoqi@0 1100 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
aoqi@0 1101 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1102 if (fc->is_free()) {
aoqi@0 1103 return fc->size();
aoqi@0 1104 } else {
aoqi@0 1105 // Ignore mark word because this may be a recently promoted
aoqi@0 1106 // object whose mark word is used to chain together grey
aoqi@0 1107 // objects (the last one would have a null value).
aoqi@0 1108 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1109 return adjustObjectSize(oop(p)->size());
aoqi@0 1110 }
aoqi@0 1111 }
aoqi@0 1112
aoqi@0 1113 // This implementation assumes that the property of "being an object" is
aoqi@0 1114 // stable. But being a free chunk may not be (because of parallel
aoqi@0 1115 // promotion.)
aoqi@0 1116 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
aoqi@0 1117 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1118 assert(is_in_reserved(p), "Should be in space");
aoqi@0 1119 // When doing a mark-sweep-compact of the CMS generation, this
aoqi@0 1120 // assertion may fail because prepare_for_compaction() uses
aoqi@0 1121 // space that is garbage to maintain information on ranges of
aoqi@0 1122 // live objects so that these live ranges can be moved as a whole.
aoqi@0 1123 // Comment out this assertion until that problem can be solved
aoqi@0 1124 // (i.e., that the block start calculation may look at objects
aoqi@0 1125 // at address below "p" in finding the object that contains "p"
aoqi@0 1126 // and those objects (if garbage) may have been modified to hold
aoqi@0 1127 // live range information.
aoqi@0 1128 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
aoqi@0 1129 // "Should be a block boundary");
aoqi@0 1130 if (FreeChunk::indicatesFreeChunk(p)) return false;
aoqi@0 1131 Klass* k = oop(p)->klass_or_null();
aoqi@0 1132 if (k != NULL) {
aoqi@0 1133 // Ignore mark word because it may have been used to
aoqi@0 1134 // chain together promoted objects (the last one
aoqi@0 1135 // would have a null value).
aoqi@0 1136 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1137 return true;
aoqi@0 1138 } else {
aoqi@0 1139 return false; // Was not an object at the start of collection.
aoqi@0 1140 }
aoqi@0 1141 }
aoqi@0 1142
aoqi@0 1143 // Check if the object is alive. This fact is checked either by consulting
aoqi@0 1144 // the main marking bitmap in the sweeping phase or, if it's a permanent
aoqi@0 1145 // generation and we're not in the sweeping phase, by checking the
aoqi@0 1146 // perm_gen_verify_bit_map where we store the "deadness" information if
aoqi@0 1147 // we did not sweep the perm gen in the most recent previous GC cycle.
aoqi@0 1148 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
aoqi@0 1149 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
aoqi@0 1150 "Else races are possible");
aoqi@0 1151 assert(block_is_obj(p), "The address should point to an object");
aoqi@0 1152
aoqi@0 1153 // If we're sweeping, we use object liveness information from the main bit map
aoqi@0 1154 // for both perm gen and old gen.
aoqi@0 1155 // We don't need to lock the bitmap (live_map or dead_map below), because
aoqi@0 1156 // EITHER we are in the middle of the sweeping phase, and the
aoqi@0 1157 // main marking bit map (live_map below) is locked,
aoqi@0 1158 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
aoqi@0 1159 // is stable, because it's mutated only in the sweeping phase.
aoqi@0 1160 // NOTE: This method is also used by jmap where, if class unloading is
aoqi@0 1161 // off, the results can return "false" for legitimate perm objects,
aoqi@0 1162 // when we are not in the midst of a sweeping phase, which can result
aoqi@0 1163 // in jmap not reporting certain perm gen objects. This will be moot
aoqi@0 1164 // if/when the perm gen goes away in the future.
aoqi@0 1165 if (_collector->abstract_state() == CMSCollector::Sweeping) {
aoqi@0 1166 CMSBitMap* live_map = _collector->markBitMap();
aoqi@0 1167 return live_map->par_isMarked((HeapWord*) p);
aoqi@0 1168 }
aoqi@0 1169 return true;
aoqi@0 1170 }
aoqi@0 1171
aoqi@0 1172 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
aoqi@0 1173 FreeChunk* fc = (FreeChunk*)p;
aoqi@0 1174 assert(is_in_reserved(p), "Should be in space");
aoqi@0 1175 assert(_bt.block_start(p) == p, "Should be a block boundary");
aoqi@0 1176 if (!fc->is_free()) {
aoqi@0 1177 // Ignore mark word because it may have been used to
aoqi@0 1178 // chain together promoted objects (the last one
aoqi@0 1179 // would have a null value).
aoqi@0 1180 assert(oop(p)->is_oop(true), "Should be an oop");
aoqi@0 1181 return true;
aoqi@0 1182 }
aoqi@0 1183 return false;
aoqi@0 1184 }
aoqi@0 1185
aoqi@0 1186 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
aoqi@0 1187 // approximate answer if you don't hold the freelistlock when you call this.
aoqi@0 1188 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
aoqi@0 1189 size_t size = 0;
aoqi@0 1190 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 1191 debug_only(
aoqi@0 1192 // We may be calling here without the lock in which case we
aoqi@0 1193 // won't do this modest sanity check.
aoqi@0 1194 if (freelistLock()->owned_by_self()) {
aoqi@0 1195 size_t total_list_size = 0;
aoqi@0 1196 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
aoqi@0 1197 fc = fc->next()) {
aoqi@0 1198 total_list_size += i;
aoqi@0 1199 }
aoqi@0 1200 assert(total_list_size == i * _indexedFreeList[i].count(),
aoqi@0 1201 "Count in list is incorrect");
aoqi@0 1202 }
aoqi@0 1203 )
aoqi@0 1204 size += i * _indexedFreeList[i].count();
aoqi@0 1205 }
aoqi@0 1206 return size;
aoqi@0 1207 }
aoqi@0 1208
aoqi@0 1209 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
aoqi@0 1210 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
aoqi@0 1211 return allocate(size);
aoqi@0 1212 }
aoqi@0 1213
aoqi@0 1214 HeapWord*
aoqi@0 1215 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
aoqi@0 1216 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
aoqi@0 1217 }
aoqi@0 1218
aoqi@0 1219 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
aoqi@0 1220 assert_lock_strong(freelistLock());
aoqi@0 1221 HeapWord* res = NULL;
aoqi@0 1222 assert(size == adjustObjectSize(size),
aoqi@0 1223 "use adjustObjectSize() before calling into allocate()");
aoqi@0 1224
aoqi@0 1225 if (_adaptive_freelists) {
aoqi@0 1226 res = allocate_adaptive_freelists(size);
aoqi@0 1227 } else { // non-adaptive free lists
aoqi@0 1228 res = allocate_non_adaptive_freelists(size);
aoqi@0 1229 }
aoqi@0 1230
aoqi@0 1231 if (res != NULL) {
aoqi@0 1232 // check that res does lie in this space!
aoqi@0 1233 assert(is_in_reserved(res), "Not in this space!");
aoqi@0 1234 assert(is_aligned((void*)res), "alignment check");
aoqi@0 1235
aoqi@0 1236 FreeChunk* fc = (FreeChunk*)res;
aoqi@0 1237 fc->markNotFree();
aoqi@0 1238 assert(!fc->is_free(), "shouldn't be marked free");
aoqi@0 1239 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
aoqi@0 1240 // Verify that the block offset table shows this to
aoqi@0 1241 // be a single block, but not one which is unallocated.
aoqi@0 1242 _bt.verify_single_block(res, size);
aoqi@0 1243 _bt.verify_not_unallocated(res, size);
aoqi@0 1244 // mangle a just allocated object with a distinct pattern.
aoqi@0 1245 debug_only(fc->mangleAllocated(size));
aoqi@0 1246 }
aoqi@0 1247
aoqi@0 1248 return res;
aoqi@0 1249 }
aoqi@0 1250
aoqi@0 1251 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
aoqi@0 1252 HeapWord* res = NULL;
aoqi@0 1253 // try and use linear allocation for smaller blocks
aoqi@0 1254 if (size < _smallLinearAllocBlock._allocation_size_limit) {
aoqi@0 1255 // if successful, the following also adjusts block offset table
aoqi@0 1256 res = getChunkFromSmallLinearAllocBlock(size);
aoqi@0 1257 }
aoqi@0 1258 // Else triage to indexed lists for smaller sizes
aoqi@0 1259 if (res == NULL) {
aoqi@0 1260 if (size < SmallForDictionary) {
aoqi@0 1261 res = (HeapWord*) getChunkFromIndexedFreeList(size);
aoqi@0 1262 } else {
aoqi@0 1263 // else get it from the big dictionary; if even this doesn't
aoqi@0 1264 // work we are out of luck.
aoqi@0 1265 res = (HeapWord*)getChunkFromDictionaryExact(size);
aoqi@0 1266 }
aoqi@0 1267 }
aoqi@0 1268
aoqi@0 1269 return res;
aoqi@0 1270 }
aoqi@0 1271
aoqi@0 1272 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
aoqi@0 1273 assert_lock_strong(freelistLock());
aoqi@0 1274 HeapWord* res = NULL;
aoqi@0 1275 assert(size == adjustObjectSize(size),
aoqi@0 1276 "use adjustObjectSize() before calling into allocate()");
aoqi@0 1277
aoqi@0 1278 // Strategy
aoqi@0 1279 // if small
aoqi@0 1280 // exact size from small object indexed list if small
aoqi@0 1281 // small or large linear allocation block (linAB) as appropriate
aoqi@0 1282 // take from lists of greater sized chunks
aoqi@0 1283 // else
aoqi@0 1284 // dictionary
aoqi@0 1285 // small or large linear allocation block if it has the space
aoqi@0 1286 // Try allocating exact size from indexTable first
aoqi@0 1287 if (size < IndexSetSize) {
aoqi@0 1288 res = (HeapWord*) getChunkFromIndexedFreeList(size);
aoqi@0 1289 if(res != NULL) {
aoqi@0 1290 assert(res != (HeapWord*)_indexedFreeList[size].head(),
aoqi@0 1291 "Not removed from free list");
aoqi@0 1292 // no block offset table adjustment is necessary on blocks in
aoqi@0 1293 // the indexed lists.
aoqi@0 1294
aoqi@0 1295 // Try allocating from the small LinAB
aoqi@0 1296 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
aoqi@0 1297 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
aoqi@0 1298 // if successful, the above also adjusts block offset table
aoqi@0 1299 // Note that this call will refill the LinAB to
aoqi@0 1300 // satisfy the request. This is different that
aoqi@0 1301 // evm.
aoqi@0 1302 // Don't record chunk off a LinAB? smallSplitBirth(size);
aoqi@0 1303 } else {
aoqi@0 1304 // Raid the exact free lists larger than size, even if they are not
aoqi@0 1305 // overpopulated.
aoqi@0 1306 res = (HeapWord*) getChunkFromGreater(size);
aoqi@0 1307 }
aoqi@0 1308 } else {
aoqi@0 1309 // Big objects get allocated directly from the dictionary.
aoqi@0 1310 res = (HeapWord*) getChunkFromDictionaryExact(size);
aoqi@0 1311 if (res == NULL) {
aoqi@0 1312 // Try hard not to fail since an allocation failure will likely
aoqi@0 1313 // trigger a synchronous GC. Try to get the space from the
aoqi@0 1314 // allocation blocks.
aoqi@0 1315 res = getChunkFromSmallLinearAllocBlockRemainder(size);
aoqi@0 1316 }
aoqi@0 1317 }
aoqi@0 1318
aoqi@0 1319 return res;
aoqi@0 1320 }
aoqi@0 1321
aoqi@0 1322 // A worst-case estimate of the space required (in HeapWords) to expand the heap
aoqi@0 1323 // when promoting obj.
aoqi@0 1324 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
aoqi@0 1325 // Depending on the object size, expansion may require refilling either a
aoqi@0 1326 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
aoqi@0 1327 // is added because the dictionary may over-allocate to avoid fragmentation.
aoqi@0 1328 size_t space = obj_size;
aoqi@0 1329 if (!_adaptive_freelists) {
aoqi@0 1330 space = MAX2(space, _smallLinearAllocBlock._refillSize);
aoqi@0 1331 }
aoqi@0 1332 space += _promoInfo.refillSize() + 2 * MinChunkSize;
aoqi@0 1333 return space;
aoqi@0 1334 }
aoqi@0 1335
aoqi@0 1336 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
aoqi@0 1337 FreeChunk* ret;
aoqi@0 1338
aoqi@0 1339 assert(numWords >= MinChunkSize, "Size is less than minimum");
aoqi@0 1340 assert(linearAllocationWouldFail() || bestFitFirst(),
aoqi@0 1341 "Should not be here");
aoqi@0 1342
aoqi@0 1343 size_t i;
aoqi@0 1344 size_t currSize = numWords + MinChunkSize;
aoqi@0 1345 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
aoqi@0 1346 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 1347 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
aoqi@0 1348 if (fl->head()) {
aoqi@0 1349 ret = getFromListGreater(fl, numWords);
aoqi@0 1350 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
aoqi@0 1351 return ret;
aoqi@0 1352 }
aoqi@0 1353 }
aoqi@0 1354
aoqi@0 1355 currSize = MAX2((size_t)SmallForDictionary,
aoqi@0 1356 (size_t)(numWords + MinChunkSize));
aoqi@0 1357
aoqi@0 1358 /* Try to get a chunk that satisfies request, while avoiding
aoqi@0 1359 fragmentation that can't be handled. */
aoqi@0 1360 {
aoqi@0 1361 ret = dictionary()->get_chunk(currSize);
aoqi@0 1362 if (ret != NULL) {
aoqi@0 1363 assert(ret->size() - numWords >= MinChunkSize,
aoqi@0 1364 "Chunk is too small");
aoqi@0 1365 _bt.allocated((HeapWord*)ret, ret->size());
aoqi@0 1366 /* Carve returned chunk. */
aoqi@0 1367 (void) splitChunkAndReturnRemainder(ret, numWords);
aoqi@0 1368 /* Label this as no longer a free chunk. */
aoqi@0 1369 assert(ret->is_free(), "This chunk should be free");
aoqi@0 1370 ret->link_prev(NULL);
aoqi@0 1371 }
aoqi@0 1372 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
aoqi@0 1373 return ret;
aoqi@0 1374 }
aoqi@0 1375 ShouldNotReachHere();
aoqi@0 1376 }
aoqi@0 1377
aoqi@0 1378 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
aoqi@0 1379 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
aoqi@0 1380 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
aoqi@0 1381 }
aoqi@0 1382
aoqi@0 1383 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
aoqi@0 1384 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
aoqi@0 1385 (_smallLinearAllocBlock._word_size == fc->size()),
aoqi@0 1386 "Linear allocation block shows incorrect size");
aoqi@0 1387 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
aoqi@0 1388 (_smallLinearAllocBlock._word_size == fc->size()));
aoqi@0 1389 }
aoqi@0 1390
aoqi@0 1391 // Check if the purported free chunk is present either as a linear
aoqi@0 1392 // allocation block, the size-indexed table of (smaller) free blocks,
aoqi@0 1393 // or the larger free blocks kept in the binary tree dictionary.
aoqi@0 1394 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
aoqi@0 1395 if (verify_chunk_is_linear_alloc_block(fc)) {
aoqi@0 1396 return true;
aoqi@0 1397 } else if (fc->size() < IndexSetSize) {
aoqi@0 1398 return verifyChunkInIndexedFreeLists(fc);
aoqi@0 1399 } else {
aoqi@0 1400 return dictionary()->verify_chunk_in_free_list(fc);
aoqi@0 1401 }
aoqi@0 1402 }
aoqi@0 1403
aoqi@0 1404 #ifndef PRODUCT
aoqi@0 1405 void CompactibleFreeListSpace::assert_locked() const {
aoqi@0 1406 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
aoqi@0 1407 }
aoqi@0 1408
aoqi@0 1409 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
aoqi@0 1410 CMSLockVerifier::assert_locked(lock);
aoqi@0 1411 }
aoqi@0 1412 #endif
aoqi@0 1413
aoqi@0 1414 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
aoqi@0 1415 // In the parallel case, the main thread holds the free list lock
aoqi@0 1416 // on behalf the parallel threads.
aoqi@0 1417 FreeChunk* fc;
aoqi@0 1418 {
aoqi@0 1419 // If GC is parallel, this might be called by several threads.
aoqi@0 1420 // This should be rare enough that the locking overhead won't affect
aoqi@0 1421 // the sequential code.
aoqi@0 1422 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 1423 Mutex::_no_safepoint_check_flag);
aoqi@0 1424 fc = getChunkFromDictionary(size);
aoqi@0 1425 }
aoqi@0 1426 if (fc != NULL) {
aoqi@0 1427 fc->dontCoalesce();
aoqi@0 1428 assert(fc->is_free(), "Should be free, but not coalescable");
aoqi@0 1429 // Verify that the block offset table shows this to
aoqi@0 1430 // be a single block, but not one which is unallocated.
aoqi@0 1431 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 1432 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 1433 }
aoqi@0 1434 return fc;
aoqi@0 1435 }
aoqi@0 1436
aoqi@0 1437 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
aoqi@0 1438 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
aoqi@0 1439 assert_locked();
aoqi@0 1440
aoqi@0 1441 // if we are tracking promotions, then first ensure space for
aoqi@0 1442 // promotion (including spooling space for saving header if necessary).
aoqi@0 1443 // then allocate and copy, then track promoted info if needed.
aoqi@0 1444 // When tracking (see PromotionInfo::track()), the mark word may
aoqi@0 1445 // be displaced and in this case restoration of the mark word
aoqi@0 1446 // occurs in the (oop_since_save_marks_)iterate phase.
aoqi@0 1447 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
aoqi@0 1448 return NULL;
aoqi@0 1449 }
aoqi@0 1450 // Call the allocate(size_t, bool) form directly to avoid the
aoqi@0 1451 // additional call through the allocate(size_t) form. Having
aoqi@0 1452 // the compile inline the call is problematic because allocate(size_t)
aoqi@0 1453 // is a virtual method.
aoqi@0 1454 HeapWord* res = allocate(adjustObjectSize(obj_size));
aoqi@0 1455 if (res != NULL) {
aoqi@0 1456 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
aoqi@0 1457 // if we should be tracking promotions, do so.
aoqi@0 1458 if (_promoInfo.tracking()) {
aoqi@0 1459 _promoInfo.track((PromotedObject*)res);
aoqi@0 1460 }
aoqi@0 1461 }
aoqi@0 1462 return oop(res);
aoqi@0 1463 }
aoqi@0 1464
aoqi@0 1465 HeapWord*
aoqi@0 1466 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
aoqi@0 1467 assert_locked();
aoqi@0 1468 assert(size >= MinChunkSize, "minimum chunk size");
aoqi@0 1469 assert(size < _smallLinearAllocBlock._allocation_size_limit,
aoqi@0 1470 "maximum from smallLinearAllocBlock");
aoqi@0 1471 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
aoqi@0 1472 }
aoqi@0 1473
aoqi@0 1474 HeapWord*
aoqi@0 1475 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
aoqi@0 1476 size_t size) {
aoqi@0 1477 assert_locked();
aoqi@0 1478 assert(size >= MinChunkSize, "too small");
aoqi@0 1479 HeapWord* res = NULL;
aoqi@0 1480 // Try to do linear allocation from blk, making sure that
aoqi@0 1481 if (blk->_word_size == 0) {
aoqi@0 1482 // We have probably been unable to fill this either in the prologue or
aoqi@0 1483 // when it was exhausted at the last linear allocation. Bail out until
aoqi@0 1484 // next time.
aoqi@0 1485 assert(blk->_ptr == NULL, "consistency check");
aoqi@0 1486 return NULL;
aoqi@0 1487 }
aoqi@0 1488 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
aoqi@0 1489 res = getChunkFromLinearAllocBlockRemainder(blk, size);
aoqi@0 1490 if (res != NULL) return res;
aoqi@0 1491
aoqi@0 1492 // about to exhaust this linear allocation block
aoqi@0 1493 if (blk->_word_size == size) { // exactly satisfied
aoqi@0 1494 res = blk->_ptr;
aoqi@0 1495 _bt.allocated(res, blk->_word_size);
aoqi@0 1496 } else if (size + MinChunkSize <= blk->_refillSize) {
aoqi@0 1497 size_t sz = blk->_word_size;
aoqi@0 1498 // Update _unallocated_block if the size is such that chunk would be
aoqi@0 1499 // returned to the indexed free list. All other chunks in the indexed
aoqi@0 1500 // free lists are allocated from the dictionary so that _unallocated_block
aoqi@0 1501 // has already been adjusted for them. Do it here so that the cost
aoqi@0 1502 // for all chunks added back to the indexed free lists.
aoqi@0 1503 if (sz < SmallForDictionary) {
aoqi@0 1504 _bt.allocated(blk->_ptr, sz);
aoqi@0 1505 }
aoqi@0 1506 // Return the chunk that isn't big enough, and then refill below.
aoqi@0 1507 addChunkToFreeLists(blk->_ptr, sz);
aoqi@0 1508 split_birth(sz);
aoqi@0 1509 // Don't keep statistics on adding back chunk from a LinAB.
aoqi@0 1510 } else {
aoqi@0 1511 // A refilled block would not satisfy the request.
aoqi@0 1512 return NULL;
aoqi@0 1513 }
aoqi@0 1514
aoqi@0 1515 blk->_ptr = NULL; blk->_word_size = 0;
aoqi@0 1516 refillLinearAllocBlock(blk);
aoqi@0 1517 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
aoqi@0 1518 "block was replenished");
aoqi@0 1519 if (res != NULL) {
aoqi@0 1520 split_birth(size);
aoqi@0 1521 repairLinearAllocBlock(blk);
aoqi@0 1522 } else if (blk->_ptr != NULL) {
aoqi@0 1523 res = blk->_ptr;
aoqi@0 1524 size_t blk_size = blk->_word_size;
aoqi@0 1525 blk->_word_size -= size;
aoqi@0 1526 blk->_ptr += size;
aoqi@0 1527 split_birth(size);
aoqi@0 1528 repairLinearAllocBlock(blk);
aoqi@0 1529 // Update BOT last so that other (parallel) GC threads see a consistent
aoqi@0 1530 // view of the BOT and free blocks.
aoqi@0 1531 // Above must occur before BOT is updated below.
aoqi@0 1532 OrderAccess::storestore();
aoqi@0 1533 _bt.split_block(res, blk_size, size); // adjust block offset table
aoqi@0 1534 }
aoqi@0 1535 return res;
aoqi@0 1536 }
aoqi@0 1537
aoqi@0 1538 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
aoqi@0 1539 LinearAllocBlock* blk,
aoqi@0 1540 size_t size) {
aoqi@0 1541 assert_locked();
aoqi@0 1542 assert(size >= MinChunkSize, "too small");
aoqi@0 1543
aoqi@0 1544 HeapWord* res = NULL;
aoqi@0 1545 // This is the common case. Keep it simple.
aoqi@0 1546 if (blk->_word_size >= size + MinChunkSize) {
aoqi@0 1547 assert(blk->_ptr != NULL, "consistency check");
aoqi@0 1548 res = blk->_ptr;
aoqi@0 1549 // Note that the BOT is up-to-date for the linAB before allocation. It
aoqi@0 1550 // indicates the start of the linAB. The split_block() updates the
aoqi@0 1551 // BOT for the linAB after the allocation (indicates the start of the
aoqi@0 1552 // next chunk to be allocated).
aoqi@0 1553 size_t blk_size = blk->_word_size;
aoqi@0 1554 blk->_word_size -= size;
aoqi@0 1555 blk->_ptr += size;
aoqi@0 1556 split_birth(size);
aoqi@0 1557 repairLinearAllocBlock(blk);
aoqi@0 1558 // Update BOT last so that other (parallel) GC threads see a consistent
aoqi@0 1559 // view of the BOT and free blocks.
aoqi@0 1560 // Above must occur before BOT is updated below.
aoqi@0 1561 OrderAccess::storestore();
aoqi@0 1562 _bt.split_block(res, blk_size, size); // adjust block offset table
aoqi@0 1563 _bt.allocated(res, size);
aoqi@0 1564 }
aoqi@0 1565 return res;
aoqi@0 1566 }
aoqi@0 1567
aoqi@0 1568 FreeChunk*
aoqi@0 1569 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
aoqi@0 1570 assert_locked();
aoqi@0 1571 assert(size < SmallForDictionary, "just checking");
aoqi@0 1572 FreeChunk* res;
aoqi@0 1573 res = _indexedFreeList[size].get_chunk_at_head();
aoqi@0 1574 if (res == NULL) {
aoqi@0 1575 res = getChunkFromIndexedFreeListHelper(size);
aoqi@0 1576 }
aoqi@0 1577 _bt.verify_not_unallocated((HeapWord*) res, size);
aoqi@0 1578 assert(res == NULL || res->size() == size, "Incorrect block size");
aoqi@0 1579 return res;
aoqi@0 1580 }
aoqi@0 1581
aoqi@0 1582 FreeChunk*
aoqi@0 1583 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
aoqi@0 1584 bool replenish) {
aoqi@0 1585 assert_locked();
aoqi@0 1586 FreeChunk* fc = NULL;
aoqi@0 1587 if (size < SmallForDictionary) {
aoqi@0 1588 assert(_indexedFreeList[size].head() == NULL ||
aoqi@0 1589 _indexedFreeList[size].surplus() <= 0,
aoqi@0 1590 "List for this size should be empty or under populated");
aoqi@0 1591 // Try best fit in exact lists before replenishing the list
aoqi@0 1592 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
aoqi@0 1593 // Replenish list.
aoqi@0 1594 //
aoqi@0 1595 // Things tried that failed.
aoqi@0 1596 // Tried allocating out of the two LinAB's first before
aoqi@0 1597 // replenishing lists.
aoqi@0 1598 // Tried small linAB of size 256 (size in indexed list)
aoqi@0 1599 // and replenishing indexed lists from the small linAB.
aoqi@0 1600 //
aoqi@0 1601 FreeChunk* newFc = NULL;
aoqi@0 1602 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
aoqi@0 1603 if (replenish_size < SmallForDictionary) {
aoqi@0 1604 // Do not replenish from an underpopulated size.
aoqi@0 1605 if (_indexedFreeList[replenish_size].surplus() > 0 &&
aoqi@0 1606 _indexedFreeList[replenish_size].head() != NULL) {
aoqi@0 1607 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
aoqi@0 1608 } else if (bestFitFirst()) {
aoqi@0 1609 newFc = bestFitSmall(replenish_size);
aoqi@0 1610 }
aoqi@0 1611 }
aoqi@0 1612 if (newFc == NULL && replenish_size > size) {
aoqi@0 1613 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
aoqi@0 1614 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
aoqi@0 1615 }
aoqi@0 1616 // Note: The stats update re split-death of block obtained above
aoqi@0 1617 // will be recorded below precisely when we know we are going to
aoqi@0 1618 // be actually splitting it into more than one pieces below.
aoqi@0 1619 if (newFc != NULL) {
aoqi@0 1620 if (replenish || CMSReplenishIntermediate) {
aoqi@0 1621 // Replenish this list and return one block to caller.
aoqi@0 1622 size_t i;
aoqi@0 1623 FreeChunk *curFc, *nextFc;
aoqi@0 1624 size_t num_blk = newFc->size() / size;
aoqi@0 1625 assert(num_blk >= 1, "Smaller than requested?");
aoqi@0 1626 assert(newFc->size() % size == 0, "Should be integral multiple of request");
aoqi@0 1627 if (num_blk > 1) {
aoqi@0 1628 // we are sure we will be splitting the block just obtained
aoqi@0 1629 // into multiple pieces; record the split-death of the original
aoqi@0 1630 splitDeath(replenish_size);
aoqi@0 1631 }
aoqi@0 1632 // carve up and link blocks 0, ..., num_blk - 2
aoqi@0 1633 // The last chunk is not added to the lists but is returned as the
aoqi@0 1634 // free chunk.
aoqi@0 1635 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
aoqi@0 1636 i = 0;
aoqi@0 1637 i < (num_blk - 1);
aoqi@0 1638 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
aoqi@0 1639 i++) {
aoqi@0 1640 curFc->set_size(size);
aoqi@0 1641 // Don't record this as a return in order to try and
aoqi@0 1642 // determine the "returns" from a GC.
aoqi@0 1643 _bt.verify_not_unallocated((HeapWord*) fc, size);
aoqi@0 1644 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
aoqi@0 1645 _bt.mark_block((HeapWord*)curFc, size);
aoqi@0 1646 split_birth(size);
aoqi@0 1647 // Don't record the initial population of the indexed list
aoqi@0 1648 // as a split birth.
aoqi@0 1649 }
aoqi@0 1650
aoqi@0 1651 // check that the arithmetic was OK above
aoqi@0 1652 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
aoqi@0 1653 "inconsistency in carving newFc");
aoqi@0 1654 curFc->set_size(size);
aoqi@0 1655 _bt.mark_block((HeapWord*)curFc, size);
aoqi@0 1656 split_birth(size);
aoqi@0 1657 fc = curFc;
aoqi@0 1658 } else {
aoqi@0 1659 // Return entire block to caller
aoqi@0 1660 fc = newFc;
aoqi@0 1661 }
aoqi@0 1662 }
aoqi@0 1663 }
aoqi@0 1664 } else {
aoqi@0 1665 // Get a free chunk from the free chunk dictionary to be returned to
aoqi@0 1666 // replenish the indexed free list.
aoqi@0 1667 fc = getChunkFromDictionaryExact(size);
aoqi@0 1668 }
aoqi@0 1669 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
aoqi@0 1670 return fc;
aoqi@0 1671 }
aoqi@0 1672
aoqi@0 1673 FreeChunk*
aoqi@0 1674 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
aoqi@0 1675 assert_locked();
aoqi@0 1676 FreeChunk* fc = _dictionary->get_chunk(size,
aoqi@0 1677 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1678 if (fc == NULL) {
aoqi@0 1679 return NULL;
aoqi@0 1680 }
aoqi@0 1681 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1682 if (fc->size() >= size + MinChunkSize) {
aoqi@0 1683 fc = splitChunkAndReturnRemainder(fc, size);
aoqi@0 1684 }
aoqi@0 1685 assert(fc->size() >= size, "chunk too small");
aoqi@0 1686 assert(fc->size() < size + MinChunkSize, "chunk too big");
aoqi@0 1687 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 1688 return fc;
aoqi@0 1689 }
aoqi@0 1690
aoqi@0 1691 FreeChunk*
aoqi@0 1692 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
aoqi@0 1693 assert_locked();
aoqi@0 1694 FreeChunk* fc = _dictionary->get_chunk(size,
aoqi@0 1695 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1696 if (fc == NULL) {
aoqi@0 1697 return fc;
aoqi@0 1698 }
aoqi@0 1699 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1700 if (fc->size() == size) {
aoqi@0 1701 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1702 return fc;
aoqi@0 1703 }
aoqi@0 1704 assert(fc->size() > size, "get_chunk() guarantee");
aoqi@0 1705 if (fc->size() < size + MinChunkSize) {
aoqi@0 1706 // Return the chunk to the dictionary and go get a bigger one.
aoqi@0 1707 returnChunkToDictionary(fc);
aoqi@0 1708 fc = _dictionary->get_chunk(size + MinChunkSize,
aoqi@0 1709 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 1710 if (fc == NULL) {
aoqi@0 1711 return NULL;
aoqi@0 1712 }
aoqi@0 1713 _bt.allocated((HeapWord*)fc, fc->size());
aoqi@0 1714 }
aoqi@0 1715 assert(fc->size() >= size + MinChunkSize, "tautology");
aoqi@0 1716 fc = splitChunkAndReturnRemainder(fc, size);
aoqi@0 1717 assert(fc->size() == size, "chunk is wrong size");
aoqi@0 1718 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1719 return fc;
aoqi@0 1720 }
aoqi@0 1721
aoqi@0 1722 void
aoqi@0 1723 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
aoqi@0 1724 assert_locked();
aoqi@0 1725
aoqi@0 1726 size_t size = chunk->size();
aoqi@0 1727 _bt.verify_single_block((HeapWord*)chunk, size);
aoqi@0 1728 // adjust _unallocated_block downward, as necessary
aoqi@0 1729 _bt.freed((HeapWord*)chunk, size);
aoqi@0 1730 _dictionary->return_chunk(chunk);
aoqi@0 1731 #ifndef PRODUCT
aoqi@0 1732 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
aoqi@0 1733 TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
aoqi@0 1734 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
aoqi@0 1735 tl->verify_stats();
aoqi@0 1736 }
aoqi@0 1737 #endif // PRODUCT
aoqi@0 1738 }
aoqi@0 1739
aoqi@0 1740 void
aoqi@0 1741 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
aoqi@0 1742 assert_locked();
aoqi@0 1743 size_t size = fc->size();
aoqi@0 1744 _bt.verify_single_block((HeapWord*) fc, size);
aoqi@0 1745 _bt.verify_not_unallocated((HeapWord*) fc, size);
aoqi@0 1746 if (_adaptive_freelists) {
aoqi@0 1747 _indexedFreeList[size].return_chunk_at_tail(fc);
aoqi@0 1748 } else {
aoqi@0 1749 _indexedFreeList[size].return_chunk_at_head(fc);
aoqi@0 1750 }
aoqi@0 1751 #ifndef PRODUCT
aoqi@0 1752 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
aoqi@0 1753 _indexedFreeList[size].verify_stats();
aoqi@0 1754 }
aoqi@0 1755 #endif // PRODUCT
aoqi@0 1756 }
aoqi@0 1757
aoqi@0 1758 // Add chunk to end of last block -- if it's the largest
aoqi@0 1759 // block -- and update BOT and census data. We would
aoqi@0 1760 // of course have preferred to coalesce it with the
aoqi@0 1761 // last block, but it's currently less expensive to find the
aoqi@0 1762 // largest block than it is to find the last.
aoqi@0 1763 void
aoqi@0 1764 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
aoqi@0 1765 HeapWord* chunk, size_t size) {
aoqi@0 1766 // check that the chunk does lie in this space!
aoqi@0 1767 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
aoqi@0 1768 // One of the parallel gc task threads may be here
aoqi@0 1769 // whilst others are allocating.
aoqi@0 1770 Mutex* lock = NULL;
aoqi@0 1771 if (ParallelGCThreads != 0) {
aoqi@0 1772 lock = &_parDictionaryAllocLock;
aoqi@0 1773 }
aoqi@0 1774 FreeChunk* ec;
aoqi@0 1775 {
aoqi@0 1776 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
aoqi@0 1777 ec = dictionary()->find_largest_dict(); // get largest block
aoqi@0 1778 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
aoqi@0 1779 // It's a coterminal block - we can coalesce.
aoqi@0 1780 size_t old_size = ec->size();
aoqi@0 1781 coalDeath(old_size);
aoqi@0 1782 removeChunkFromDictionary(ec);
aoqi@0 1783 size += old_size;
aoqi@0 1784 } else {
aoqi@0 1785 ec = (FreeChunk*)chunk;
aoqi@0 1786 }
aoqi@0 1787 }
aoqi@0 1788 ec->set_size(size);
aoqi@0 1789 debug_only(ec->mangleFreed(size));
aoqi@0 1790 if (size < SmallForDictionary && ParallelGCThreads != 0) {
aoqi@0 1791 lock = _indexedFreeListParLocks[size];
aoqi@0 1792 }
aoqi@0 1793 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
aoqi@0 1794 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
aoqi@0 1795 // record the birth under the lock since the recording involves
aoqi@0 1796 // manipulation of the list on which the chunk lives and
aoqi@0 1797 // if the chunk is allocated and is the last on the list,
aoqi@0 1798 // the list can go away.
aoqi@0 1799 coalBirth(size);
aoqi@0 1800 }
aoqi@0 1801
aoqi@0 1802 void
aoqi@0 1803 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
aoqi@0 1804 size_t size) {
aoqi@0 1805 // check that the chunk does lie in this space!
aoqi@0 1806 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
aoqi@0 1807 assert_locked();
aoqi@0 1808 _bt.verify_single_block(chunk, size);
aoqi@0 1809
aoqi@0 1810 FreeChunk* fc = (FreeChunk*) chunk;
aoqi@0 1811 fc->set_size(size);
aoqi@0 1812 debug_only(fc->mangleFreed(size));
aoqi@0 1813 if (size < SmallForDictionary) {
aoqi@0 1814 returnChunkToFreeList(fc);
aoqi@0 1815 } else {
aoqi@0 1816 returnChunkToDictionary(fc);
aoqi@0 1817 }
aoqi@0 1818 }
aoqi@0 1819
aoqi@0 1820 void
aoqi@0 1821 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
aoqi@0 1822 size_t size, bool coalesced) {
aoqi@0 1823 assert_locked();
aoqi@0 1824 assert(chunk != NULL, "null chunk");
aoqi@0 1825 if (coalesced) {
aoqi@0 1826 // repair BOT
aoqi@0 1827 _bt.single_block(chunk, size);
aoqi@0 1828 }
aoqi@0 1829 addChunkToFreeLists(chunk, size);
aoqi@0 1830 }
aoqi@0 1831
aoqi@0 1832 // We _must_ find the purported chunk on our free lists;
aoqi@0 1833 // we assert if we don't.
aoqi@0 1834 void
aoqi@0 1835 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
aoqi@0 1836 size_t size = fc->size();
aoqi@0 1837 assert_locked();
aoqi@0 1838 debug_only(verifyFreeLists());
aoqi@0 1839 if (size < SmallForDictionary) {
aoqi@0 1840 removeChunkFromIndexedFreeList(fc);
aoqi@0 1841 } else {
aoqi@0 1842 removeChunkFromDictionary(fc);
aoqi@0 1843 }
aoqi@0 1844 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1845 debug_only(verifyFreeLists());
aoqi@0 1846 }
aoqi@0 1847
aoqi@0 1848 void
aoqi@0 1849 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
aoqi@0 1850 size_t size = fc->size();
aoqi@0 1851 assert_locked();
aoqi@0 1852 assert(fc != NULL, "null chunk");
aoqi@0 1853 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1854 _dictionary->remove_chunk(fc);
aoqi@0 1855 // adjust _unallocated_block upward, as necessary
aoqi@0 1856 _bt.allocated((HeapWord*)fc, size);
aoqi@0 1857 }
aoqi@0 1858
aoqi@0 1859 void
aoqi@0 1860 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
aoqi@0 1861 assert_locked();
aoqi@0 1862 size_t size = fc->size();
aoqi@0 1863 _bt.verify_single_block((HeapWord*)fc, size);
aoqi@0 1864 NOT_PRODUCT(
aoqi@0 1865 if (FLSVerifyIndexTable) {
aoqi@0 1866 verifyIndexedFreeList(size);
aoqi@0 1867 }
aoqi@0 1868 )
aoqi@0 1869 _indexedFreeList[size].remove_chunk(fc);
aoqi@0 1870 NOT_PRODUCT(
aoqi@0 1871 if (FLSVerifyIndexTable) {
aoqi@0 1872 verifyIndexedFreeList(size);
aoqi@0 1873 }
aoqi@0 1874 )
aoqi@0 1875 }
aoqi@0 1876
aoqi@0 1877 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
aoqi@0 1878 /* A hint is the next larger size that has a surplus.
aoqi@0 1879 Start search at a size large enough to guarantee that
aoqi@0 1880 the excess is >= MIN_CHUNK. */
aoqi@0 1881 size_t start = align_object_size(numWords + MinChunkSize);
aoqi@0 1882 if (start < IndexSetSize) {
aoqi@0 1883 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
aoqi@0 1884 size_t hint = _indexedFreeList[start].hint();
aoqi@0 1885 while (hint < IndexSetSize) {
aoqi@0 1886 assert(hint % MinObjAlignment == 0, "hint should be aligned");
aoqi@0 1887 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
aoqi@0 1888 if (fl->surplus() > 0 && fl->head() != NULL) {
aoqi@0 1889 // Found a list with surplus, reset original hint
aoqi@0 1890 // and split out a free chunk which is returned.
aoqi@0 1891 _indexedFreeList[start].set_hint(hint);
aoqi@0 1892 FreeChunk* res = getFromListGreater(fl, numWords);
aoqi@0 1893 assert(res == NULL || res->is_free(),
aoqi@0 1894 "Should be returning a free chunk");
aoqi@0 1895 return res;
aoqi@0 1896 }
aoqi@0 1897 hint = fl->hint(); /* keep looking */
aoqi@0 1898 }
aoqi@0 1899 /* None found. */
aoqi@0 1900 it[start].set_hint(IndexSetSize);
aoqi@0 1901 }
aoqi@0 1902 return NULL;
aoqi@0 1903 }
aoqi@0 1904
aoqi@0 1905 /* Requires fl->size >= numWords + MinChunkSize */
aoqi@0 1906 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
aoqi@0 1907 size_t numWords) {
aoqi@0 1908 FreeChunk *curr = fl->head();
aoqi@0 1909 size_t oldNumWords = curr->size();
aoqi@0 1910 assert(numWords >= MinChunkSize, "Word size is too small");
aoqi@0 1911 assert(curr != NULL, "List is empty");
aoqi@0 1912 assert(oldNumWords >= numWords + MinChunkSize,
aoqi@0 1913 "Size of chunks in the list is too small");
aoqi@0 1914
aoqi@0 1915 fl->remove_chunk(curr);
aoqi@0 1916 // recorded indirectly by splitChunkAndReturnRemainder -
aoqi@0 1917 // smallSplit(oldNumWords, numWords);
aoqi@0 1918 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
aoqi@0 1919 // Does anything have to be done for the remainder in terms of
aoqi@0 1920 // fixing the card table?
aoqi@0 1921 assert(new_chunk == NULL || new_chunk->is_free(),
aoqi@0 1922 "Should be returning a free chunk");
aoqi@0 1923 return new_chunk;
aoqi@0 1924 }
aoqi@0 1925
aoqi@0 1926 FreeChunk*
aoqi@0 1927 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
aoqi@0 1928 size_t new_size) {
aoqi@0 1929 assert_locked();
aoqi@0 1930 size_t size = chunk->size();
aoqi@0 1931 assert(size > new_size, "Split from a smaller block?");
aoqi@0 1932 assert(is_aligned(chunk), "alignment problem");
aoqi@0 1933 assert(size == adjustObjectSize(size), "alignment problem");
aoqi@0 1934 size_t rem_size = size - new_size;
aoqi@0 1935 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
aoqi@0 1936 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
aoqi@0 1937 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
aoqi@0 1938 assert(is_aligned(ffc), "alignment problem");
aoqi@0 1939 ffc->set_size(rem_size);
aoqi@0 1940 ffc->link_next(NULL);
aoqi@0 1941 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 1942 // Above must occur before BOT is updated below.
aoqi@0 1943 // adjust block offset table
aoqi@0 1944 OrderAccess::storestore();
aoqi@0 1945 assert(chunk->is_free() && ffc->is_free(), "Error");
aoqi@0 1946 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
aoqi@0 1947 if (rem_size < SmallForDictionary) {
aoqi@0 1948 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
aoqi@0 1949 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
aoqi@0 1950 assert(!is_par ||
aoqi@0 1951 (SharedHeap::heap()->n_par_threads() ==
aoqi@0 1952 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
aoqi@0 1953 returnChunkToFreeList(ffc);
aoqi@0 1954 split(size, rem_size);
aoqi@0 1955 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
aoqi@0 1956 } else {
aoqi@0 1957 returnChunkToDictionary(ffc);
aoqi@0 1958 split(size ,rem_size);
aoqi@0 1959 }
aoqi@0 1960 chunk->set_size(new_size);
aoqi@0 1961 return chunk;
aoqi@0 1962 }
aoqi@0 1963
aoqi@0 1964 void
aoqi@0 1965 CompactibleFreeListSpace::sweep_completed() {
aoqi@0 1966 // Now that space is probably plentiful, refill linear
aoqi@0 1967 // allocation blocks as needed.
aoqi@0 1968 refillLinearAllocBlocksIfNeeded();
aoqi@0 1969 }
aoqi@0 1970
aoqi@0 1971 void
aoqi@0 1972 CompactibleFreeListSpace::gc_prologue() {
aoqi@0 1973 assert_locked();
aoqi@0 1974 if (PrintFLSStatistics != 0) {
aoqi@0 1975 gclog_or_tty->print("Before GC:\n");
aoqi@0 1976 reportFreeListStatistics();
aoqi@0 1977 }
aoqi@0 1978 refillLinearAllocBlocksIfNeeded();
aoqi@0 1979 }
aoqi@0 1980
aoqi@0 1981 void
aoqi@0 1982 CompactibleFreeListSpace::gc_epilogue() {
aoqi@0 1983 assert_locked();
aoqi@0 1984 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
aoqi@0 1985 if (_smallLinearAllocBlock._word_size == 0)
aoqi@0 1986 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
aoqi@0 1987 }
aoqi@0 1988 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
aoqi@0 1989 _promoInfo.stopTrackingPromotions();
aoqi@0 1990 repairLinearAllocationBlocks();
aoqi@0 1991 // Print Space's stats
aoqi@0 1992 if (PrintFLSStatistics != 0) {
aoqi@0 1993 gclog_or_tty->print("After GC:\n");
aoqi@0 1994 reportFreeListStatistics();
aoqi@0 1995 }
aoqi@0 1996 }
aoqi@0 1997
aoqi@0 1998 // Iteration support, mostly delegated from a CMS generation
aoqi@0 1999
aoqi@0 2000 void CompactibleFreeListSpace::save_marks() {
aoqi@0 2001 assert(Thread::current()->is_VM_thread(),
aoqi@0 2002 "Global variable should only be set when single-threaded");
aoqi@0 2003 // Mark the "end" of the used space at the time of this call;
aoqi@0 2004 // note, however, that promoted objects from this point
aoqi@0 2005 // on are tracked in the _promoInfo below.
aoqi@0 2006 set_saved_mark_word(unallocated_block());
aoqi@0 2007 #ifdef ASSERT
aoqi@0 2008 // Check the sanity of save_marks() etc.
aoqi@0 2009 MemRegion ur = used_region();
aoqi@0 2010 MemRegion urasm = used_region_at_save_marks();
aoqi@0 2011 assert(ur.contains(urasm),
aoqi@0 2012 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
aoqi@0 2013 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
aoqi@0 2014 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
aoqi@0 2015 #endif
aoqi@0 2016 // inform allocator that promotions should be tracked.
aoqi@0 2017 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
aoqi@0 2018 _promoInfo.startTrackingPromotions();
aoqi@0 2019 }
aoqi@0 2020
aoqi@0 2021 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
aoqi@0 2022 assert(_promoInfo.tracking(), "No preceding save_marks?");
aoqi@0 2023 assert(SharedHeap::heap()->n_par_threads() == 0,
aoqi@0 2024 "Shouldn't be called if using parallel gc.");
aoqi@0 2025 return _promoInfo.noPromotions();
aoqi@0 2026 }
aoqi@0 2027
aoqi@0 2028 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
aoqi@0 2029 \
aoqi@0 2030 void CompactibleFreeListSpace:: \
aoqi@0 2031 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
aoqi@0 2032 assert(SharedHeap::heap()->n_par_threads() == 0, \
aoqi@0 2033 "Shouldn't be called (yet) during parallel part of gc."); \
aoqi@0 2034 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
aoqi@0 2035 /* \
aoqi@0 2036 * This also restores any displaced headers and removes the elements from \
aoqi@0 2037 * the iteration set as they are processed, so that we have a clean slate \
aoqi@0 2038 * at the end of the iteration. Note, thus, that if new objects are \
aoqi@0 2039 * promoted as a result of the iteration they are iterated over as well. \
aoqi@0 2040 */ \
aoqi@0 2041 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
aoqi@0 2042 }
aoqi@0 2043
aoqi@0 2044 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
aoqi@0 2045
aoqi@0 2046 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
aoqi@0 2047 return _smallLinearAllocBlock._word_size == 0;
aoqi@0 2048 }
aoqi@0 2049
aoqi@0 2050 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
aoqi@0 2051 // Fix up linear allocation blocks to look like free blocks
aoqi@0 2052 repairLinearAllocBlock(&_smallLinearAllocBlock);
aoqi@0 2053 }
aoqi@0 2054
aoqi@0 2055 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
aoqi@0 2056 assert_locked();
aoqi@0 2057 if (blk->_ptr != NULL) {
aoqi@0 2058 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
aoqi@0 2059 "Minimum block size requirement");
aoqi@0 2060 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
aoqi@0 2061 fc->set_size(blk->_word_size);
aoqi@0 2062 fc->link_prev(NULL); // mark as free
aoqi@0 2063 fc->dontCoalesce();
aoqi@0 2064 assert(fc->is_free(), "just marked it free");
aoqi@0 2065 assert(fc->cantCoalesce(), "just marked it uncoalescable");
aoqi@0 2066 }
aoqi@0 2067 }
aoqi@0 2068
aoqi@0 2069 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
aoqi@0 2070 assert_locked();
aoqi@0 2071 if (_smallLinearAllocBlock._ptr == NULL) {
aoqi@0 2072 assert(_smallLinearAllocBlock._word_size == 0,
aoqi@0 2073 "Size of linAB should be zero if the ptr is NULL");
aoqi@0 2074 // Reset the linAB refill and allocation size limit.
aoqi@0 2075 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
aoqi@0 2076 }
aoqi@0 2077 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
aoqi@0 2078 }
aoqi@0 2079
aoqi@0 2080 void
aoqi@0 2081 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
aoqi@0 2082 assert_locked();
aoqi@0 2083 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
aoqi@0 2084 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
aoqi@0 2085 "blk invariant");
aoqi@0 2086 if (blk->_ptr == NULL) {
aoqi@0 2087 refillLinearAllocBlock(blk);
aoqi@0 2088 }
aoqi@0 2089 if (PrintMiscellaneous && Verbose) {
aoqi@0 2090 if (blk->_word_size == 0) {
aoqi@0 2091 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
aoqi@0 2092 }
aoqi@0 2093 }
aoqi@0 2094 }
aoqi@0 2095
aoqi@0 2096 void
aoqi@0 2097 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
aoqi@0 2098 assert_locked();
aoqi@0 2099 assert(blk->_word_size == 0 && blk->_ptr == NULL,
aoqi@0 2100 "linear allocation block should be empty");
aoqi@0 2101 FreeChunk* fc;
aoqi@0 2102 if (blk->_refillSize < SmallForDictionary &&
aoqi@0 2103 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
aoqi@0 2104 // A linAB's strategy might be to use small sizes to reduce
aoqi@0 2105 // fragmentation but still get the benefits of allocation from a
aoqi@0 2106 // linAB.
aoqi@0 2107 } else {
aoqi@0 2108 fc = getChunkFromDictionary(blk->_refillSize);
aoqi@0 2109 }
aoqi@0 2110 if (fc != NULL) {
aoqi@0 2111 blk->_ptr = (HeapWord*)fc;
aoqi@0 2112 blk->_word_size = fc->size();
aoqi@0 2113 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
aoqi@0 2114 }
aoqi@0 2115 }
aoqi@0 2116
aoqi@0 2117 // Support for concurrent collection policy decisions.
aoqi@0 2118 bool CompactibleFreeListSpace::should_concurrent_collect() const {
aoqi@0 2119 // In the future we might want to add in frgamentation stats --
aoqi@0 2120 // including erosion of the "mountain" into this decision as well.
aoqi@0 2121 return !adaptive_freelists() && linearAllocationWouldFail();
aoqi@0 2122 }
aoqi@0 2123
aoqi@0 2124 // Support for compaction
aoqi@0 2125
aoqi@0 2126 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
aoqi@0 2127 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
aoqi@0 2128 // prepare_for_compaction() uses the space between live objects
aoqi@0 2129 // so that later phase can skip dead space quickly. So verification
aoqi@0 2130 // of the free lists doesn't work after.
aoqi@0 2131 }
aoqi@0 2132
aoqi@0 2133 #define obj_size(q) adjustObjectSize(oop(q)->size())
aoqi@0 2134 #define adjust_obj_size(s) adjustObjectSize(s)
aoqi@0 2135
aoqi@0 2136 void CompactibleFreeListSpace::adjust_pointers() {
aoqi@0 2137 // In other versions of adjust_pointers(), a bail out
aoqi@0 2138 // based on the amount of live data in the generation
aoqi@0 2139 // (i.e., if 0, bail out) may be used.
aoqi@0 2140 // Cannot test used() == 0 here because the free lists have already
aoqi@0 2141 // been mangled by the compaction.
aoqi@0 2142
aoqi@0 2143 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
aoqi@0 2144 // See note about verification in prepare_for_compaction().
aoqi@0 2145 }
aoqi@0 2146
aoqi@0 2147 void CompactibleFreeListSpace::compact() {
aoqi@0 2148 SCAN_AND_COMPACT(obj_size);
aoqi@0 2149 }
aoqi@0 2150
aoqi@0 2151 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
aoqi@0 2152 // where fbs is free block sizes
aoqi@0 2153 double CompactibleFreeListSpace::flsFrag() const {
aoqi@0 2154 size_t itabFree = totalSizeInIndexedFreeLists();
aoqi@0 2155 double frag = 0.0;
aoqi@0 2156 size_t i;
aoqi@0 2157
aoqi@0 2158 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2159 double sz = i;
aoqi@0 2160 frag += _indexedFreeList[i].count() * (sz * sz);
aoqi@0 2161 }
aoqi@0 2162
aoqi@0 2163 double totFree = itabFree +
aoqi@0 2164 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
aoqi@0 2165 if (totFree > 0) {
aoqi@0 2166 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
aoqi@0 2167 (totFree * totFree));
aoqi@0 2168 frag = (double)1.0 - frag;
aoqi@0 2169 } else {
aoqi@0 2170 assert(frag == 0.0, "Follows from totFree == 0");
aoqi@0 2171 }
aoqi@0 2172 return frag;
aoqi@0 2173 }
aoqi@0 2174
aoqi@0 2175 void CompactibleFreeListSpace::beginSweepFLCensus(
aoqi@0 2176 float inter_sweep_current,
aoqi@0 2177 float inter_sweep_estimate,
aoqi@0 2178 float intra_sweep_estimate) {
aoqi@0 2179 assert_locked();
aoqi@0 2180 size_t i;
aoqi@0 2181 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2182 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
aoqi@0 2183 if (PrintFLSStatistics > 1) {
aoqi@0 2184 gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
aoqi@0 2185 }
aoqi@0 2186 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
aoqi@0 2187 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
aoqi@0 2188 fl->set_before_sweep(fl->count());
aoqi@0 2189 fl->set_bfr_surp(fl->surplus());
aoqi@0 2190 }
aoqi@0 2191 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
aoqi@0 2192 inter_sweep_current,
aoqi@0 2193 inter_sweep_estimate,
aoqi@0 2194 intra_sweep_estimate);
aoqi@0 2195 }
aoqi@0 2196
aoqi@0 2197 void CompactibleFreeListSpace::setFLSurplus() {
aoqi@0 2198 assert_locked();
aoqi@0 2199 size_t i;
aoqi@0 2200 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2201 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2202 fl->set_surplus(fl->count() -
aoqi@0 2203 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
aoqi@0 2204 }
aoqi@0 2205 }
aoqi@0 2206
aoqi@0 2207 void CompactibleFreeListSpace::setFLHints() {
aoqi@0 2208 assert_locked();
aoqi@0 2209 size_t i;
aoqi@0 2210 size_t h = IndexSetSize;
aoqi@0 2211 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
aoqi@0 2212 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2213 fl->set_hint(h);
aoqi@0 2214 if (fl->surplus() > 0) {
aoqi@0 2215 h = i;
aoqi@0 2216 }
aoqi@0 2217 }
aoqi@0 2218 }
aoqi@0 2219
aoqi@0 2220 void CompactibleFreeListSpace::clearFLCensus() {
aoqi@0 2221 assert_locked();
aoqi@0 2222 size_t i;
aoqi@0 2223 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2224 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2225 fl->set_prev_sweep(fl->count());
aoqi@0 2226 fl->set_coal_births(0);
aoqi@0 2227 fl->set_coal_deaths(0);
aoqi@0 2228 fl->set_split_births(0);
aoqi@0 2229 fl->set_split_deaths(0);
aoqi@0 2230 }
aoqi@0 2231 }
aoqi@0 2232
aoqi@0 2233 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
aoqi@0 2234 if (PrintFLSStatistics > 0) {
aoqi@0 2235 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
aoqi@0 2236 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
aoqi@0 2237 p2i(largestAddr));
aoqi@0 2238 }
aoqi@0 2239 setFLSurplus();
aoqi@0 2240 setFLHints();
aoqi@0 2241 if (PrintGC && PrintFLSCensus > 0) {
aoqi@0 2242 printFLCensus(sweep_count);
aoqi@0 2243 }
aoqi@0 2244 clearFLCensus();
aoqi@0 2245 assert_locked();
aoqi@0 2246 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
aoqi@0 2247 }
aoqi@0 2248
aoqi@0 2249 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
aoqi@0 2250 if (size < SmallForDictionary) {
aoqi@0 2251 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2252 return (fl->coal_desired() < 0) ||
aoqi@0 2253 ((int)fl->count() > fl->coal_desired());
aoqi@0 2254 } else {
aoqi@0 2255 return dictionary()->coal_dict_over_populated(size);
aoqi@0 2256 }
aoqi@0 2257 }
aoqi@0 2258
aoqi@0 2259 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
aoqi@0 2260 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2261 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2262 fl->increment_coal_births();
aoqi@0 2263 fl->increment_surplus();
aoqi@0 2264 }
aoqi@0 2265
aoqi@0 2266 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
aoqi@0 2267 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2268 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2269 fl->increment_coal_deaths();
aoqi@0 2270 fl->decrement_surplus();
aoqi@0 2271 }
aoqi@0 2272
aoqi@0 2273 void CompactibleFreeListSpace::coalBirth(size_t size) {
aoqi@0 2274 if (size < SmallForDictionary) {
aoqi@0 2275 smallCoalBirth(size);
aoqi@0 2276 } else {
aoqi@0 2277 dictionary()->dict_census_update(size,
aoqi@0 2278 false /* split */,
aoqi@0 2279 true /* birth */);
aoqi@0 2280 }
aoqi@0 2281 }
aoqi@0 2282
aoqi@0 2283 void CompactibleFreeListSpace::coalDeath(size_t size) {
aoqi@0 2284 if(size < SmallForDictionary) {
aoqi@0 2285 smallCoalDeath(size);
aoqi@0 2286 } else {
aoqi@0 2287 dictionary()->dict_census_update(size,
aoqi@0 2288 false /* split */,
aoqi@0 2289 false /* birth */);
aoqi@0 2290 }
aoqi@0 2291 }
aoqi@0 2292
aoqi@0 2293 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
aoqi@0 2294 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2295 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2296 fl->increment_split_births();
aoqi@0 2297 fl->increment_surplus();
aoqi@0 2298 }
aoqi@0 2299
aoqi@0 2300 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
aoqi@0 2301 assert(size < SmallForDictionary, "Size too large for indexed list");
aoqi@0 2302 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
aoqi@0 2303 fl->increment_split_deaths();
aoqi@0 2304 fl->decrement_surplus();
aoqi@0 2305 }
aoqi@0 2306
aoqi@0 2307 void CompactibleFreeListSpace::split_birth(size_t size) {
aoqi@0 2308 if (size < SmallForDictionary) {
aoqi@0 2309 smallSplitBirth(size);
aoqi@0 2310 } else {
aoqi@0 2311 dictionary()->dict_census_update(size,
aoqi@0 2312 true /* split */,
aoqi@0 2313 true /* birth */);
aoqi@0 2314 }
aoqi@0 2315 }
aoqi@0 2316
aoqi@0 2317 void CompactibleFreeListSpace::splitDeath(size_t size) {
aoqi@0 2318 if (size < SmallForDictionary) {
aoqi@0 2319 smallSplitDeath(size);
aoqi@0 2320 } else {
aoqi@0 2321 dictionary()->dict_census_update(size,
aoqi@0 2322 true /* split */,
aoqi@0 2323 false /* birth */);
aoqi@0 2324 }
aoqi@0 2325 }
aoqi@0 2326
aoqi@0 2327 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
aoqi@0 2328 size_t to2 = from - to1;
aoqi@0 2329 splitDeath(from);
aoqi@0 2330 split_birth(to1);
aoqi@0 2331 split_birth(to2);
aoqi@0 2332 }
aoqi@0 2333
aoqi@0 2334 void CompactibleFreeListSpace::print() const {
aoqi@0 2335 print_on(tty);
aoqi@0 2336 }
aoqi@0 2337
aoqi@0 2338 void CompactibleFreeListSpace::prepare_for_verify() {
aoqi@0 2339 assert_locked();
aoqi@0 2340 repairLinearAllocationBlocks();
aoqi@0 2341 // Verify that the SpoolBlocks look like free blocks of
aoqi@0 2342 // appropriate sizes... To be done ...
aoqi@0 2343 }
aoqi@0 2344
aoqi@0 2345 class VerifyAllBlksClosure: public BlkClosure {
aoqi@0 2346 private:
aoqi@0 2347 const CompactibleFreeListSpace* _sp;
aoqi@0 2348 const MemRegion _span;
aoqi@0 2349 HeapWord* _last_addr;
aoqi@0 2350 size_t _last_size;
aoqi@0 2351 bool _last_was_obj;
aoqi@0 2352 bool _last_was_live;
aoqi@0 2353
aoqi@0 2354 public:
aoqi@0 2355 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
aoqi@0 2356 MemRegion span) : _sp(sp), _span(span),
aoqi@0 2357 _last_addr(NULL), _last_size(0),
aoqi@0 2358 _last_was_obj(false), _last_was_live(false) { }
aoqi@0 2359
aoqi@0 2360 virtual size_t do_blk(HeapWord* addr) {
aoqi@0 2361 size_t res;
aoqi@0 2362 bool was_obj = false;
aoqi@0 2363 bool was_live = false;
aoqi@0 2364 if (_sp->block_is_obj(addr)) {
aoqi@0 2365 was_obj = true;
aoqi@0 2366 oop p = oop(addr);
aoqi@0 2367 guarantee(p->is_oop(), "Should be an oop");
aoqi@0 2368 res = _sp->adjustObjectSize(p->size());
aoqi@0 2369 if (_sp->obj_is_alive(addr)) {
aoqi@0 2370 was_live = true;
aoqi@0 2371 p->verify();
aoqi@0 2372 }
aoqi@0 2373 } else {
aoqi@0 2374 FreeChunk* fc = (FreeChunk*)addr;
aoqi@0 2375 res = fc->size();
aoqi@0 2376 if (FLSVerifyLists && !fc->cantCoalesce()) {
aoqi@0 2377 guarantee(_sp->verify_chunk_in_free_list(fc),
aoqi@0 2378 "Chunk should be on a free list");
aoqi@0 2379 }
aoqi@0 2380 }
aoqi@0 2381 if (res == 0) {
aoqi@0 2382 gclog_or_tty->print_cr("Livelock: no rank reduction!");
aoqi@0 2383 gclog_or_tty->print_cr(
aoqi@0 2384 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
aoqi@0 2385 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
aoqi@0 2386 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
aoqi@0 2387 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
aoqi@0 2388 _sp->print_on(gclog_or_tty);
aoqi@0 2389 guarantee(false, "Seppuku!");
aoqi@0 2390 }
aoqi@0 2391 _last_addr = addr;
aoqi@0 2392 _last_size = res;
aoqi@0 2393 _last_was_obj = was_obj;
aoqi@0 2394 _last_was_live = was_live;
aoqi@0 2395 return res;
aoqi@0 2396 }
aoqi@0 2397 };
aoqi@0 2398
aoqi@0 2399 class VerifyAllOopsClosure: public OopClosure {
aoqi@0 2400 private:
aoqi@0 2401 const CMSCollector* _collector;
aoqi@0 2402 const CompactibleFreeListSpace* _sp;
aoqi@0 2403 const MemRegion _span;
aoqi@0 2404 const bool _past_remark;
aoqi@0 2405 const CMSBitMap* _bit_map;
aoqi@0 2406
aoqi@0 2407 protected:
aoqi@0 2408 void do_oop(void* p, oop obj) {
aoqi@0 2409 if (_span.contains(obj)) { // the interior oop points into CMS heap
aoqi@0 2410 if (!_span.contains(p)) { // reference from outside CMS heap
aoqi@0 2411 // Should be a valid object; the first disjunct below allows
aoqi@0 2412 // us to sidestep an assertion in block_is_obj() that insists
aoqi@0 2413 // that p be in _sp. Note that several generations (and spaces)
aoqi@0 2414 // are spanned by _span (CMS heap) above.
aoqi@0 2415 guarantee(!_sp->is_in_reserved(obj) ||
aoqi@0 2416 _sp->block_is_obj((HeapWord*)obj),
aoqi@0 2417 "Should be an object");
aoqi@0 2418 guarantee(obj->is_oop(), "Should be an oop");
aoqi@0 2419 obj->verify();
aoqi@0 2420 if (_past_remark) {
aoqi@0 2421 // Remark has been completed, the object should be marked
aoqi@0 2422 _bit_map->isMarked((HeapWord*)obj);
aoqi@0 2423 }
aoqi@0 2424 } else { // reference within CMS heap
aoqi@0 2425 if (_past_remark) {
aoqi@0 2426 // Remark has been completed -- so the referent should have
aoqi@0 2427 // been marked, if referring object is.
aoqi@0 2428 if (_bit_map->isMarked(_collector->block_start(p))) {
aoqi@0 2429 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
aoqi@0 2430 }
aoqi@0 2431 }
aoqi@0 2432 }
aoqi@0 2433 } else if (_sp->is_in_reserved(p)) {
aoqi@0 2434 // the reference is from FLS, and points out of FLS
aoqi@0 2435 guarantee(obj->is_oop(), "Should be an oop");
aoqi@0 2436 obj->verify();
aoqi@0 2437 }
aoqi@0 2438 }
aoqi@0 2439
aoqi@0 2440 template <class T> void do_oop_work(T* p) {
aoqi@0 2441 T heap_oop = oopDesc::load_heap_oop(p);
aoqi@0 2442 if (!oopDesc::is_null(heap_oop)) {
aoqi@0 2443 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
aoqi@0 2444 do_oop(p, obj);
aoqi@0 2445 }
aoqi@0 2446 }
aoqi@0 2447
aoqi@0 2448 public:
aoqi@0 2449 VerifyAllOopsClosure(const CMSCollector* collector,
aoqi@0 2450 const CompactibleFreeListSpace* sp, MemRegion span,
aoqi@0 2451 bool past_remark, CMSBitMap* bit_map) :
aoqi@0 2452 _collector(collector), _sp(sp), _span(span),
aoqi@0 2453 _past_remark(past_remark), _bit_map(bit_map) { }
aoqi@0 2454
aoqi@0 2455 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
aoqi@0 2456 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
aoqi@0 2457 };
aoqi@0 2458
aoqi@0 2459 void CompactibleFreeListSpace::verify() const {
aoqi@0 2460 assert_lock_strong(&_freelistLock);
aoqi@0 2461 verify_objects_initialized();
aoqi@0 2462 MemRegion span = _collector->_span;
aoqi@0 2463 bool past_remark = (_collector->abstract_state() ==
aoqi@0 2464 CMSCollector::Sweeping);
aoqi@0 2465
aoqi@0 2466 ResourceMark rm;
aoqi@0 2467 HandleMark hm;
aoqi@0 2468
aoqi@0 2469 // Check integrity of CFL data structures
aoqi@0 2470 _promoInfo.verify();
aoqi@0 2471 _dictionary->verify();
aoqi@0 2472 if (FLSVerifyIndexTable) {
aoqi@0 2473 verifyIndexedFreeLists();
aoqi@0 2474 }
aoqi@0 2475 // Check integrity of all objects and free blocks in space
aoqi@0 2476 {
aoqi@0 2477 VerifyAllBlksClosure cl(this, span);
aoqi@0 2478 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
aoqi@0 2479 }
aoqi@0 2480 // Check that all references in the heap to FLS
aoqi@0 2481 // are to valid objects in FLS or that references in
aoqi@0 2482 // FLS are to valid objects elsewhere in the heap
aoqi@0 2483 if (FLSVerifyAllHeapReferences)
aoqi@0 2484 {
aoqi@0 2485 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
aoqi@0 2486 _collector->markBitMap());
aoqi@0 2487 CollectedHeap* ch = Universe::heap();
aoqi@0 2488
aoqi@0 2489 // Iterate over all oops in the heap. Uses the _no_header version
aoqi@0 2490 // since we are not interested in following the klass pointers.
aoqi@0 2491 ch->oop_iterate_no_header(&cl);
aoqi@0 2492 }
aoqi@0 2493
aoqi@0 2494 if (VerifyObjectStartArray) {
aoqi@0 2495 // Verify the block offset table
aoqi@0 2496 _bt.verify();
aoqi@0 2497 }
aoqi@0 2498 }
aoqi@0 2499
aoqi@0 2500 #ifndef PRODUCT
aoqi@0 2501 void CompactibleFreeListSpace::verifyFreeLists() const {
aoqi@0 2502 if (FLSVerifyLists) {
aoqi@0 2503 _dictionary->verify();
aoqi@0 2504 verifyIndexedFreeLists();
aoqi@0 2505 } else {
aoqi@0 2506 if (FLSVerifyDictionary) {
aoqi@0 2507 _dictionary->verify();
aoqi@0 2508 }
aoqi@0 2509 if (FLSVerifyIndexTable) {
aoqi@0 2510 verifyIndexedFreeLists();
aoqi@0 2511 }
aoqi@0 2512 }
aoqi@0 2513 }
aoqi@0 2514 #endif
aoqi@0 2515
aoqi@0 2516 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
aoqi@0 2517 size_t i = 0;
aoqi@0 2518 for (; i < IndexSetStart; i++) {
aoqi@0 2519 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
aoqi@0 2520 }
aoqi@0 2521 for (; i < IndexSetSize; i++) {
aoqi@0 2522 verifyIndexedFreeList(i);
aoqi@0 2523 }
aoqi@0 2524 }
aoqi@0 2525
aoqi@0 2526 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
aoqi@0 2527 FreeChunk* fc = _indexedFreeList[size].head();
aoqi@0 2528 FreeChunk* tail = _indexedFreeList[size].tail();
aoqi@0 2529 size_t num = _indexedFreeList[size].count();
aoqi@0 2530 size_t n = 0;
aoqi@0 2531 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
aoqi@0 2532 "Slot should have been empty");
aoqi@0 2533 for (; fc != NULL; fc = fc->next(), n++) {
aoqi@0 2534 guarantee(fc->size() == size, "Size inconsistency");
aoqi@0 2535 guarantee(fc->is_free(), "!free?");
aoqi@0 2536 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
aoqi@0 2537 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
aoqi@0 2538 }
aoqi@0 2539 guarantee(n == num, "Incorrect count");
aoqi@0 2540 }
aoqi@0 2541
aoqi@0 2542 #ifndef PRODUCT
aoqi@0 2543 void CompactibleFreeListSpace::check_free_list_consistency() const {
aoqi@0 2544 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
aoqi@0 2545 "Some sizes can't be allocated without recourse to"
aoqi@0 2546 " linear allocation buffers");
aoqi@0 2547 assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
aoqi@0 2548 "else MIN_TREE_CHUNK_SIZE is wrong");
aoqi@0 2549 assert(IndexSetStart != 0, "IndexSetStart not initialized");
aoqi@0 2550 assert(IndexSetStride != 0, "IndexSetStride not initialized");
aoqi@0 2551 }
aoqi@0 2552 #endif
aoqi@0 2553
aoqi@0 2554 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
aoqi@0 2555 assert_lock_strong(&_freelistLock);
aoqi@0 2556 AdaptiveFreeList<FreeChunk> total;
aoqi@0 2557 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
aoqi@0 2558 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
aoqi@0 2559 size_t total_free = 0;
aoqi@0 2560 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
aoqi@0 2561 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
aoqi@0 2562 total_free += fl->count() * fl->size();
aoqi@0 2563 if (i % (40*IndexSetStride) == 0) {
aoqi@0 2564 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
aoqi@0 2565 }
aoqi@0 2566 fl->print_on(gclog_or_tty);
aoqi@0 2567 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
aoqi@0 2568 total.set_surplus( total.surplus() + fl->surplus() );
aoqi@0 2569 total.set_desired( total.desired() + fl->desired() );
aoqi@0 2570 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
aoqi@0 2571 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
aoqi@0 2572 total.set_count( total.count() + fl->count() );
aoqi@0 2573 total.set_coal_births( total.coal_births() + fl->coal_births() );
aoqi@0 2574 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
aoqi@0 2575 total.set_split_births(total.split_births() + fl->split_births());
aoqi@0 2576 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
aoqi@0 2577 }
aoqi@0 2578 total.print_on(gclog_or_tty, "TOTAL");
aoqi@0 2579 gclog_or_tty->print_cr("Total free in indexed lists "
aoqi@0 2580 SIZE_FORMAT " words", total_free);
aoqi@0 2581 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
aoqi@0 2582 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
aoqi@0 2583 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
aoqi@0 2584 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
aoqi@0 2585 _dictionary->print_dict_census();
aoqi@0 2586 }
aoqi@0 2587
aoqi@0 2588 ///////////////////////////////////////////////////////////////////////////
aoqi@0 2589 // CFLS_LAB
aoqi@0 2590 ///////////////////////////////////////////////////////////////////////////
aoqi@0 2591
aoqi@0 2592 #define VECTOR_257(x) \
aoqi@0 2593 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
aoqi@0 2594 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2595 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2596 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2597 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2598 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2599 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2600 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2601 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
aoqi@0 2602 x }
aoqi@0 2603
aoqi@0 2604 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
aoqi@0 2605 // OldPLABSize, whose static default is different; if overridden at the
aoqi@0 2606 // command-line, this will get reinitialized via a call to
aoqi@0 2607 // modify_initialization() below.
aoqi@0 2608 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
aoqi@0 2609 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
aoqi@0 2610 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
aoqi@0 2611 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
aoqi@0 2612
aoqi@0 2613 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
aoqi@0 2614 _cfls(cfls)
aoqi@0 2615 {
aoqi@0 2616 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
aoqi@0 2617 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2618 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2619 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2620 _indexedFreeList[i].set_size(i);
aoqi@0 2621 _num_blocks[i] = 0;
aoqi@0 2622 }
aoqi@0 2623 }
aoqi@0 2624
aoqi@0 2625 static bool _CFLS_LAB_modified = false;
aoqi@0 2626
aoqi@0 2627 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
aoqi@0 2628 assert(!_CFLS_LAB_modified, "Call only once");
aoqi@0 2629 _CFLS_LAB_modified = true;
aoqi@0 2630 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2631 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2632 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2633 _blocks_to_claim[i].modify(n, wt, true /* force */);
aoqi@0 2634 }
aoqi@0 2635 }
aoqi@0 2636
aoqi@0 2637 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
aoqi@0 2638 FreeChunk* res;
aoqi@0 2639 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
aoqi@0 2640 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
aoqi@0 2641 // This locking manages sync with other large object allocations.
aoqi@0 2642 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
aoqi@0 2643 Mutex::_no_safepoint_check_flag);
aoqi@0 2644 res = _cfls->getChunkFromDictionaryExact(word_sz);
aoqi@0 2645 if (res == NULL) return NULL;
aoqi@0 2646 } else {
aoqi@0 2647 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
aoqi@0 2648 if (fl->count() == 0) {
aoqi@0 2649 // Attempt to refill this local free list.
aoqi@0 2650 get_from_global_pool(word_sz, fl);
aoqi@0 2651 // If it didn't work, give up.
aoqi@0 2652 if (fl->count() == 0) return NULL;
aoqi@0 2653 }
aoqi@0 2654 res = fl->get_chunk_at_head();
aoqi@0 2655 assert(res != NULL, "Why was count non-zero?");
aoqi@0 2656 }
aoqi@0 2657 res->markNotFree();
aoqi@0 2658 assert(!res->is_free(), "shouldn't be marked free");
aoqi@0 2659 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
aoqi@0 2660 // mangle a just allocated object with a distinct pattern.
aoqi@0 2661 debug_only(res->mangleAllocated(word_sz));
aoqi@0 2662 return (HeapWord*)res;
aoqi@0 2663 }
aoqi@0 2664
aoqi@0 2665 // Get a chunk of blocks of the right size and update related
aoqi@0 2666 // book-keeping stats
aoqi@0 2667 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
aoqi@0 2668 // Get the #blocks we want to claim
aoqi@0 2669 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
aoqi@0 2670 assert(n_blks > 0, "Error");
aoqi@0 2671 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
aoqi@0 2672 // In some cases, when the application has a phase change,
aoqi@0 2673 // there may be a sudden and sharp shift in the object survival
aoqi@0 2674 // profile, and updating the counts at the end of a scavenge
aoqi@0 2675 // may not be quick enough, giving rise to large scavenge pauses
aoqi@0 2676 // during these phase changes. It is beneficial to detect such
aoqi@0 2677 // changes on-the-fly during a scavenge and avoid such a phase-change
aoqi@0 2678 // pothole. The following code is a heuristic attempt to do that.
aoqi@0 2679 // It is protected by a product flag until we have gained
aoqi@0 2680 // enough experience with this heuristic and fine-tuned its behaviour.
aoqi@0 2681 // WARNING: This might increase fragmentation if we overreact to
aoqi@0 2682 // small spikes, so some kind of historical smoothing based on
aoqi@0 2683 // previous experience with the greater reactivity might be useful.
aoqi@0 2684 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
aoqi@0 2685 // default.
aoqi@0 2686 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
aoqi@0 2687 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
aoqi@0 2688 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
aoqi@0 2689 n_blks = MIN2(n_blks, CMSOldPLABMax);
aoqi@0 2690 }
aoqi@0 2691 assert(n_blks > 0, "Error");
aoqi@0 2692 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
aoqi@0 2693 // Update stats table entry for this block size
aoqi@0 2694 _num_blocks[word_sz] += fl->count();
aoqi@0 2695 }
aoqi@0 2696
aoqi@0 2697 void CFLS_LAB::compute_desired_plab_size() {
aoqi@0 2698 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2699 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2700 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2701 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
aoqi@0 2702 "Counter inconsistency");
aoqi@0 2703 if (_global_num_workers[i] > 0) {
aoqi@0 2704 // Need to smooth wrt historical average
aoqi@0 2705 if (ResizeOldPLAB) {
aoqi@0 2706 _blocks_to_claim[i].sample(
aoqi@0 2707 MAX2((size_t)CMSOldPLABMin,
aoqi@0 2708 MIN2((size_t)CMSOldPLABMax,
aoqi@0 2709 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
aoqi@0 2710 }
aoqi@0 2711 // Reset counters for next round
aoqi@0 2712 _global_num_workers[i] = 0;
aoqi@0 2713 _global_num_blocks[i] = 0;
aoqi@0 2714 if (PrintOldPLAB) {
aoqi@0 2715 gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
aoqi@0 2716 }
aoqi@0 2717 }
aoqi@0 2718 }
aoqi@0 2719 }
aoqi@0 2720
aoqi@0 2721 // If this is changed in the future to allow parallel
aoqi@0 2722 // access, one would need to take the FL locks and,
aoqi@0 2723 // depending on how it is used, stagger access from
aoqi@0 2724 // parallel threads to reduce contention.
aoqi@0 2725 void CFLS_LAB::retire(int tid) {
aoqi@0 2726 // We run this single threaded with the world stopped;
aoqi@0 2727 // so no need for locks and such.
aoqi@0 2728 NOT_PRODUCT(Thread* t = Thread::current();)
aoqi@0 2729 assert(Thread::current()->is_VM_thread(), "Error");
aoqi@0 2730 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
aoqi@0 2731 i < CompactibleFreeListSpace::IndexSetSize;
aoqi@0 2732 i += CompactibleFreeListSpace::IndexSetStride) {
aoqi@0 2733 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
aoqi@0 2734 "Can't retire more than what we obtained");
aoqi@0 2735 if (_num_blocks[i] > 0) {
aoqi@0 2736 size_t num_retire = _indexedFreeList[i].count();
aoqi@0 2737 assert(_num_blocks[i] > num_retire, "Should have used at least one");
aoqi@0 2738 {
aoqi@0 2739 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
aoqi@0 2740 // Mutex::_no_safepoint_check_flag);
aoqi@0 2741
aoqi@0 2742 // Update globals stats for num_blocks used
aoqi@0 2743 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
aoqi@0 2744 _global_num_workers[i]++;
aoqi@0 2745 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
aoqi@0 2746 if (num_retire > 0) {
aoqi@0 2747 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
aoqi@0 2748 // Reset this list.
aoqi@0 2749 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
aoqi@0 2750 _indexedFreeList[i].set_size(i);
aoqi@0 2751 }
aoqi@0 2752 }
aoqi@0 2753 if (PrintOldPLAB) {
aoqi@0 2754 gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
aoqi@0 2755 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
aoqi@0 2756 }
aoqi@0 2757 // Reset stats for next round
aoqi@0 2758 _num_blocks[i] = 0;
aoqi@0 2759 }
aoqi@0 2760 }
aoqi@0 2761 }
aoqi@0 2762
aoqi@0 2763 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
aoqi@0 2764 assert(fl->count() == 0, "Precondition.");
aoqi@0 2765 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
aoqi@0 2766 "Precondition");
aoqi@0 2767
aoqi@0 2768 // We'll try all multiples of word_sz in the indexed set, starting with
aoqi@0 2769 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
aoqi@0 2770 // then try getting a big chunk and splitting it.
aoqi@0 2771 {
aoqi@0 2772 bool found;
aoqi@0 2773 int k;
aoqi@0 2774 size_t cur_sz;
aoqi@0 2775 for (k = 1, cur_sz = k * word_sz, found = false;
aoqi@0 2776 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
aoqi@0 2777 (CMSSplitIndexedFreeListBlocks || k <= 1);
aoqi@0 2778 k++, cur_sz = k * word_sz) {
aoqi@0 2779 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
aoqi@0 2780 fl_for_cur_sz.set_size(cur_sz);
aoqi@0 2781 {
aoqi@0 2782 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
aoqi@0 2783 Mutex::_no_safepoint_check_flag);
aoqi@0 2784 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
aoqi@0 2785 if (gfl->count() != 0) {
aoqi@0 2786 // nn is the number of chunks of size cur_sz that
aoqi@0 2787 // we'd need to split k-ways each, in order to create
aoqi@0 2788 // "n" chunks of size word_sz each.
aoqi@0 2789 const size_t nn = MAX2(n/k, (size_t)1);
aoqi@0 2790 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
aoqi@0 2791 found = true;
aoqi@0 2792 if (k > 1) {
aoqi@0 2793 // Update split death stats for the cur_sz-size blocks list:
aoqi@0 2794 // we increment the split death count by the number of blocks
aoqi@0 2795 // we just took from the cur_sz-size blocks list and which
aoqi@0 2796 // we will be splitting below.
aoqi@0 2797 ssize_t deaths = gfl->split_deaths() +
aoqi@0 2798 fl_for_cur_sz.count();
aoqi@0 2799 gfl->set_split_deaths(deaths);
aoqi@0 2800 }
aoqi@0 2801 }
aoqi@0 2802 }
aoqi@0 2803 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
aoqi@0 2804 if (found) {
aoqi@0 2805 if (k == 1) {
aoqi@0 2806 fl->prepend(&fl_for_cur_sz);
aoqi@0 2807 } else {
aoqi@0 2808 // Divide each block on fl_for_cur_sz up k ways.
aoqi@0 2809 FreeChunk* fc;
aoqi@0 2810 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
aoqi@0 2811 // Must do this in reverse order, so that anybody attempting to
aoqi@0 2812 // access the main chunk sees it as a single free block until we
aoqi@0 2813 // change it.
aoqi@0 2814 size_t fc_size = fc->size();
aoqi@0 2815 assert(fc->is_free(), "Error");
aoqi@0 2816 for (int i = k-1; i >= 0; i--) {
aoqi@0 2817 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
aoqi@0 2818 assert((i != 0) ||
aoqi@0 2819 ((fc == ffc) && ffc->is_free() &&
aoqi@0 2820 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
aoqi@0 2821 "Counting error");
aoqi@0 2822 ffc->set_size(word_sz);
aoqi@0 2823 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2824 ffc->link_next(NULL);
aoqi@0 2825 // Above must occur before BOT is updated below.
aoqi@0 2826 OrderAccess::storestore();
aoqi@0 2827 // splitting from the right, fc_size == i * word_sz
aoqi@0 2828 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
aoqi@0 2829 fc_size -= word_sz;
aoqi@0 2830 assert(fc_size == i*word_sz, "Error");
aoqi@0 2831 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
aoqi@0 2832 _bt.verify_single_block((HeapWord*)fc, fc_size);
aoqi@0 2833 _bt.verify_single_block((HeapWord*)ffc, word_sz);
aoqi@0 2834 // Push this on "fl".
aoqi@0 2835 fl->return_chunk_at_head(ffc);
aoqi@0 2836 }
aoqi@0 2837 // TRAP
aoqi@0 2838 assert(fl->tail()->next() == NULL, "List invariant.");
aoqi@0 2839 }
aoqi@0 2840 }
aoqi@0 2841 // Update birth stats for this block size.
aoqi@0 2842 size_t num = fl->count();
aoqi@0 2843 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
aoqi@0 2844 Mutex::_no_safepoint_check_flag);
aoqi@0 2845 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
aoqi@0 2846 _indexedFreeList[word_sz].set_split_births(births);
aoqi@0 2847 return;
aoqi@0 2848 }
aoqi@0 2849 }
aoqi@0 2850 }
aoqi@0 2851 // Otherwise, we'll split a block from the dictionary.
aoqi@0 2852 FreeChunk* fc = NULL;
aoqi@0 2853 FreeChunk* rem_fc = NULL;
aoqi@0 2854 size_t rem;
aoqi@0 2855 {
aoqi@0 2856 MutexLockerEx x(parDictionaryAllocLock(),
aoqi@0 2857 Mutex::_no_safepoint_check_flag);
aoqi@0 2858 while (n > 0) {
aoqi@0 2859 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
aoqi@0 2860 FreeBlockDictionary<FreeChunk>::atLeast);
aoqi@0 2861 if (fc != NULL) {
aoqi@0 2862 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
aoqi@0 2863 dictionary()->dict_census_update(fc->size(),
aoqi@0 2864 true /*split*/,
aoqi@0 2865 false /*birth*/);
aoqi@0 2866 break;
aoqi@0 2867 } else {
aoqi@0 2868 n--;
aoqi@0 2869 }
aoqi@0 2870 }
aoqi@0 2871 if (fc == NULL) return;
aoqi@0 2872 // Otherwise, split up that block.
aoqi@0 2873 assert((ssize_t)n >= 1, "Control point invariant");
aoqi@0 2874 assert(fc->is_free(), "Error: should be a free block");
aoqi@0 2875 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 2876 const size_t nn = fc->size() / word_sz;
aoqi@0 2877 n = MIN2(nn, n);
aoqi@0 2878 assert((ssize_t)n >= 1, "Control point invariant");
aoqi@0 2879 rem = fc->size() - n * word_sz;
aoqi@0 2880 // If there is a remainder, and it's too small, allocate one fewer.
aoqi@0 2881 if (rem > 0 && rem < MinChunkSize) {
aoqi@0 2882 n--; rem += word_sz;
aoqi@0 2883 }
aoqi@0 2884 // Note that at this point we may have n == 0.
aoqi@0 2885 assert((ssize_t)n >= 0, "Control point invariant");
aoqi@0 2886
aoqi@0 2887 // If n is 0, the chunk fc that was found is not large
aoqi@0 2888 // enough to leave a viable remainder. We are unable to
aoqi@0 2889 // allocate even one block. Return fc to the
aoqi@0 2890 // dictionary and return, leaving "fl" empty.
aoqi@0 2891 if (n == 0) {
aoqi@0 2892 returnChunkToDictionary(fc);
aoqi@0 2893 assert(fl->count() == 0, "We never allocated any blocks");
aoqi@0 2894 return;
aoqi@0 2895 }
aoqi@0 2896
aoqi@0 2897 // First return the remainder, if any.
aoqi@0 2898 // Note that we hold the lock until we decide if we're going to give
aoqi@0 2899 // back the remainder to the dictionary, since a concurrent allocation
aoqi@0 2900 // may otherwise see the heap as empty. (We're willing to take that
aoqi@0 2901 // hit if the block is a small block.)
aoqi@0 2902 if (rem > 0) {
aoqi@0 2903 size_t prefix_size = n * word_sz;
aoqi@0 2904 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
aoqi@0 2905 rem_fc->set_size(rem);
aoqi@0 2906 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2907 rem_fc->link_next(NULL);
aoqi@0 2908 // Above must occur before BOT is updated below.
aoqi@0 2909 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
aoqi@0 2910 OrderAccess::storestore();
aoqi@0 2911 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
aoqi@0 2912 assert(fc->is_free(), "Error");
aoqi@0 2913 fc->set_size(prefix_size);
aoqi@0 2914 if (rem >= IndexSetSize) {
aoqi@0 2915 returnChunkToDictionary(rem_fc);
aoqi@0 2916 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
aoqi@0 2917 rem_fc = NULL;
aoqi@0 2918 }
aoqi@0 2919 // Otherwise, return it to the small list below.
aoqi@0 2920 }
aoqi@0 2921 }
aoqi@0 2922 if (rem_fc != NULL) {
aoqi@0 2923 MutexLockerEx x(_indexedFreeListParLocks[rem],
aoqi@0 2924 Mutex::_no_safepoint_check_flag);
aoqi@0 2925 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
aoqi@0 2926 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
aoqi@0 2927 smallSplitBirth(rem);
aoqi@0 2928 }
aoqi@0 2929 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
aoqi@0 2930 // Now do the splitting up.
aoqi@0 2931 // Must do this in reverse order, so that anybody attempting to
aoqi@0 2932 // access the main chunk sees it as a single free block until we
aoqi@0 2933 // change it.
aoqi@0 2934 size_t fc_size = n * word_sz;
aoqi@0 2935 // All but first chunk in this loop
aoqi@0 2936 for (ssize_t i = n-1; i > 0; i--) {
aoqi@0 2937 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
aoqi@0 2938 ffc->set_size(word_sz);
aoqi@0 2939 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
aoqi@0 2940 ffc->link_next(NULL);
aoqi@0 2941 // Above must occur before BOT is updated below.
aoqi@0 2942 OrderAccess::storestore();
aoqi@0 2943 // splitting from the right, fc_size == (n - i + 1) * wordsize
aoqi@0 2944 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
aoqi@0 2945 fc_size -= word_sz;
aoqi@0 2946 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
aoqi@0 2947 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
aoqi@0 2948 _bt.verify_single_block((HeapWord*)fc, fc_size);
aoqi@0 2949 // Push this on "fl".
aoqi@0 2950 fl->return_chunk_at_head(ffc);
aoqi@0 2951 }
aoqi@0 2952 // First chunk
aoqi@0 2953 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
aoqi@0 2954 // The blocks above should show their new sizes before the first block below
aoqi@0 2955 fc->set_size(word_sz);
aoqi@0 2956 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
aoqi@0 2957 fc->link_next(NULL);
aoqi@0 2958 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
aoqi@0 2959 _bt.verify_single_block((HeapWord*)fc, fc->size());
aoqi@0 2960 fl->return_chunk_at_head(fc);
aoqi@0 2961
aoqi@0 2962 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
aoqi@0 2963 {
aoqi@0 2964 // Update the stats for this block size.
aoqi@0 2965 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
aoqi@0 2966 Mutex::_no_safepoint_check_flag);
aoqi@0 2967 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
aoqi@0 2968 _indexedFreeList[word_sz].set_split_births(births);
aoqi@0 2969 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
aoqi@0 2970 // _indexedFreeList[word_sz].set_surplus(new_surplus);
aoqi@0 2971 }
aoqi@0 2972
aoqi@0 2973 // TRAP
aoqi@0 2974 assert(fl->tail()->next() == NULL, "List invariant.");
aoqi@0 2975 }
aoqi@0 2976
aoqi@0 2977 // Set up the space's par_seq_tasks structure for work claiming
aoqi@0 2978 // for parallel rescan. See CMSParRemarkTask where this is currently used.
aoqi@0 2979 // XXX Need to suitably abstract and generalize this and the next
aoqi@0 2980 // method into one.
aoqi@0 2981 void
aoqi@0 2982 CompactibleFreeListSpace::
aoqi@0 2983 initialize_sequential_subtasks_for_rescan(int n_threads) {
aoqi@0 2984 // The "size" of each task is fixed according to rescan_task_size.
aoqi@0 2985 assert(n_threads > 0, "Unexpected n_threads argument");
aoqi@0 2986 const size_t task_size = rescan_task_size();
aoqi@0 2987 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
aoqi@0 2988 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
aoqi@0 2989 assert(n_tasks == 0 ||
aoqi@0 2990 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
aoqi@0 2991 (used_region().start() + n_tasks*task_size >= used_region().end())),
aoqi@0 2992 "n_tasks calculation incorrect");
aoqi@0 2993 SequentialSubTasksDone* pst = conc_par_seq_tasks();
aoqi@0 2994 assert(!pst->valid(), "Clobbering existing data?");
aoqi@0 2995 // Sets the condition for completion of the subtask (how many threads
aoqi@0 2996 // need to finish in order to be done).
aoqi@0 2997 pst->set_n_threads(n_threads);
aoqi@0 2998 pst->set_n_tasks((int)n_tasks);
aoqi@0 2999 }
aoqi@0 3000
aoqi@0 3001 // Set up the space's par_seq_tasks structure for work claiming
aoqi@0 3002 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
aoqi@0 3003 void
aoqi@0 3004 CompactibleFreeListSpace::
aoqi@0 3005 initialize_sequential_subtasks_for_marking(int n_threads,
aoqi@0 3006 HeapWord* low) {
aoqi@0 3007 // The "size" of each task is fixed according to rescan_task_size.
aoqi@0 3008 assert(n_threads > 0, "Unexpected n_threads argument");
aoqi@0 3009 const size_t task_size = marking_task_size();
aoqi@0 3010 assert(task_size > CardTableModRefBS::card_size_in_words &&
aoqi@0 3011 (task_size % CardTableModRefBS::card_size_in_words == 0),
aoqi@0 3012 "Otherwise arithmetic below would be incorrect");
aoqi@0 3013 MemRegion span = _gen->reserved();
aoqi@0 3014 if (low != NULL) {
aoqi@0 3015 if (span.contains(low)) {
aoqi@0 3016 // Align low down to a card boundary so that
aoqi@0 3017 // we can use block_offset_careful() on span boundaries.
aoqi@0 3018 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
aoqi@0 3019 CardTableModRefBS::card_size);
aoqi@0 3020 // Clip span prefix at aligned_low
aoqi@0 3021 span = span.intersection(MemRegion(aligned_low, span.end()));
aoqi@0 3022 } else if (low > span.end()) {
aoqi@0 3023 span = MemRegion(low, low); // Null region
aoqi@0 3024 } // else use entire span
aoqi@0 3025 }
aoqi@0 3026 assert(span.is_empty() ||
aoqi@0 3027 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
aoqi@0 3028 "span should start at a card boundary");
aoqi@0 3029 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
aoqi@0 3030 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
aoqi@0 3031 assert(n_tasks == 0 ||
aoqi@0 3032 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
aoqi@0 3033 (span.start() + n_tasks*task_size >= span.end())),
aoqi@0 3034 "n_tasks calculation incorrect");
aoqi@0 3035 SequentialSubTasksDone* pst = conc_par_seq_tasks();
aoqi@0 3036 assert(!pst->valid(), "Clobbering existing data?");
aoqi@0 3037 // Sets the condition for completion of the subtask (how many threads
aoqi@0 3038 // need to finish in order to be done).
aoqi@0 3039 pst->set_n_threads(n_threads);
aoqi@0 3040 pst->set_n_tasks((int)n_tasks);
aoqi@0 3041 }

mercurial