src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 16 Apr 2012 08:57:18 +0200

author
brutisso
date
Mon, 16 Apr 2012 08:57:18 +0200
changeset 3711
b632e80fc9dc
parent 3357
441e946dc1af
child 3730
9f059abe8cf2
permissions
-rw-r--r--

4988100: oop_verify_old_oop appears to be dead
Summary: removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com
Reviewed-by: jmasa, jwilhelm

duke@435 1 /*
brutisso@3711 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 30 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 31 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 32 #include "gc_interface/collectedHeap.hpp"
stefank@2314 33 #include "memory/allocation.inline.hpp"
stefank@2314 34 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 35 #include "memory/resourceArea.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/globals.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/init.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/vmThread.hpp"
stefank@2314 43 #include "utilities/copy.hpp"
duke@435 44
duke@435 45 /////////////////////////////////////////////////////////////////////////
duke@435 46 //// CompactibleFreeListSpace
duke@435 47 /////////////////////////////////////////////////////////////////////////
duke@435 48
duke@435 49 // highest ranked free list lock rank
duke@435 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
duke@435 51
kvn@1926 52 // Defaults are 0 so things will break badly if incorrectly initialized.
ysr@3264 53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
ysr@3264 54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
kvn@1926 55
kvn@1926 56 size_t MinChunkSize = 0;
kvn@1926 57
kvn@1926 58 void CompactibleFreeListSpace::set_cms_values() {
kvn@1926 59 // Set CMS global values
kvn@1926 60 assert(MinChunkSize == 0, "already set");
kvn@1926 61 #define numQuanta(x,y) ((x+y-1)/y)
kvn@1926 62 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
kvn@1926 63
kvn@1926 64 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
ysr@3264 65 IndexSetStart = MinChunkSize;
kvn@1926 66 IndexSetStride = MinObjAlignment;
kvn@1926 67 }
kvn@1926 68
duke@435 69 // Constructor
duke@435 70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
duke@435 71 MemRegion mr, bool use_adaptive_freelists,
duke@435 72 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
duke@435 73 _dictionaryChoice(dictionaryChoice),
duke@435 74 _adaptive_freelists(use_adaptive_freelists),
duke@435 75 _bt(bs, mr),
duke@435 76 // free list locks are in the range of values taken by _lockRank
duke@435 77 // This range currently is [_leaf+2, _leaf+3]
duke@435 78 // Note: this requires that CFLspace c'tors
duke@435 79 // are called serially in the order in which the locks are
duke@435 80 // are acquired in the program text. This is true today.
duke@435 81 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
duke@435 82 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
duke@435 83 "CompactibleFreeListSpace._dict_par_lock", true),
duke@435 84 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 85 CMSRescanMultiple),
duke@435 86 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
duke@435 87 CMSConcMarkMultiple),
duke@435 88 _collector(NULL)
duke@435 89 {
duke@435 90 _bt.set_space(this);
jmasa@698 91 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 92 // We have all of "mr", all of which we place in the dictionary
duke@435 93 // as one big chunk. We'll need to decide here which of several
duke@435 94 // possible alternative dictionary implementations to use. For
duke@435 95 // now the choice is easy, since we have only one working
duke@435 96 // implementation, namely, the simple binary tree (splaying
duke@435 97 // temporarily disabled).
duke@435 98 switch (dictionaryChoice) {
duke@435 99 case FreeBlockDictionary::dictionarySplayTree:
duke@435 100 case FreeBlockDictionary::dictionarySkipList:
duke@435 101 default:
duke@435 102 warning("dictionaryChoice: selected option not understood; using"
duke@435 103 " default BinaryTreeDictionary implementation instead.");
ysr@1580 104 case FreeBlockDictionary::dictionaryBinaryTree:
duke@435 105 _dictionary = new BinaryTreeDictionary(mr);
duke@435 106 break;
duke@435 107 }
duke@435 108 assert(_dictionary != NULL, "CMS dictionary initialization");
duke@435 109 // The indexed free lists are initially all empty and are lazily
duke@435 110 // filled in on demand. Initialize the array elements to NULL.
duke@435 111 initializeIndexedFreeListArray();
duke@435 112
duke@435 113 // Not using adaptive free lists assumes that allocation is first
duke@435 114 // from the linAB's. Also a cms perm gen which can be compacted
duke@435 115 // has to have the klass's klassKlass allocated at a lower
duke@435 116 // address in the heap than the klass so that the klassKlass is
duke@435 117 // moved to its new location before the klass is moved.
duke@435 118 // Set the _refillSize for the linear allocation blocks
duke@435 119 if (!use_adaptive_freelists) {
duke@435 120 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
duke@435 121 // The small linAB initially has all the space and will allocate
duke@435 122 // a chunk of any size.
duke@435 123 HeapWord* addr = (HeapWord*) fc;
duke@435 124 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 125 1024*SmallForLinearAlloc, fc->size());
duke@435 126 // Note that _unallocated_block is not updated here.
duke@435 127 // Allocations from the linear allocation block should
duke@435 128 // update it.
duke@435 129 } else {
duke@435 130 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
duke@435 131 SmallForLinearAlloc);
duke@435 132 }
duke@435 133 // CMSIndexedFreeListReplenish should be at least 1
duke@435 134 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
duke@435 135 _promoInfo.setSpace(this);
duke@435 136 if (UseCMSBestFit) {
duke@435 137 _fitStrategy = FreeBlockBestFitFirst;
duke@435 138 } else {
duke@435 139 _fitStrategy = FreeBlockStrategyNone;
duke@435 140 }
ysr@3220 141 check_free_list_consistency();
duke@435 142
duke@435 143 // Initialize locks for parallel case.
jmasa@2188 144
jmasa@2188 145 if (CollectedHeap::use_parallel_gc_threads()) {
duke@435 146 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 147 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
duke@435 148 "a freelist par lock",
duke@435 149 true);
duke@435 150 if (_indexedFreeListParLocks[i] == NULL)
duke@435 151 vm_exit_during_initialization("Could not allocate a par lock");
duke@435 152 DEBUG_ONLY(
duke@435 153 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
duke@435 154 )
duke@435 155 }
duke@435 156 _dictionary->set_par_lock(&_parDictionaryAllocLock);
duke@435 157 }
duke@435 158 }
duke@435 159
duke@435 160 // Like CompactibleSpace forward() but always calls cross_threshold() to
duke@435 161 // update the block offset table. Removed initialize_threshold call because
duke@435 162 // CFLS does not use a block offset array for contiguous spaces.
duke@435 163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
duke@435 164 CompactPoint* cp, HeapWord* compact_top) {
duke@435 165 // q is alive
duke@435 166 // First check if we should switch compaction space
duke@435 167 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 168 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 169 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
duke@435 170 "virtual adjustObjectSize_v() method is not correct");
duke@435 171 size_t adjusted_size = adjustObjectSize(size);
duke@435 172 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
duke@435 173 "no small fragments allowed");
duke@435 174 assert(minimum_free_block_size() == MinChunkSize,
duke@435 175 "for de-virtualized reference below");
duke@435 176 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
duke@435 177 if (adjusted_size + MinChunkSize > compaction_max_size &&
duke@435 178 adjusted_size != compaction_max_size) {
duke@435 179 do {
duke@435 180 // switch to next compaction space
duke@435 181 cp->space->set_compaction_top(compact_top);
duke@435 182 cp->space = cp->space->next_compaction_space();
duke@435 183 if (cp->space == NULL) {
duke@435 184 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 185 assert(cp->gen != NULL, "compaction must succeed");
duke@435 186 cp->space = cp->gen->first_compaction_space();
duke@435 187 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 188 }
duke@435 189 compact_top = cp->space->bottom();
duke@435 190 cp->space->set_compaction_top(compact_top);
duke@435 191 // The correct adjusted_size may not be the same as that for this method
duke@435 192 // (i.e., cp->space may no longer be "this" so adjust the size again.
duke@435 193 // Use the virtual method which is not used above to save the virtual
duke@435 194 // dispatch.
duke@435 195 adjusted_size = cp->space->adjust_object_size_v(size);
duke@435 196 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 197 assert(cp->space->minimum_free_block_size() == 0, "just checking");
duke@435 198 } while (adjusted_size > compaction_max_size);
duke@435 199 }
duke@435 200
duke@435 201 // store the forwarding pointer into the mark word
duke@435 202 if ((HeapWord*)q != compact_top) {
duke@435 203 q->forward_to(oop(compact_top));
duke@435 204 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 205 } else {
duke@435 206 // if the object isn't moving we can just set the mark to the default
duke@435 207 // mark and handle it specially later on.
duke@435 208 q->init_mark();
duke@435 209 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 210 }
duke@435 211
coleenp@548 212 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
duke@435 213 compact_top += adjusted_size;
duke@435 214
duke@435 215 // we need to update the offset table so that the beginnings of objects can be
duke@435 216 // found during scavenge. Note that we are updating the offset table based on
duke@435 217 // where the object will be once the compaction phase finishes.
duke@435 218
duke@435 219 // Always call cross_threshold(). A contiguous space can only call it when
duke@435 220 // the compaction_top exceeds the current threshold but not for an
duke@435 221 // non-contiguous space.
duke@435 222 cp->threshold =
duke@435 223 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
duke@435 224 return compact_top;
duke@435 225 }
duke@435 226
duke@435 227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
duke@435 228 // and use of single_block instead of alloc_block. The name here is not really
duke@435 229 // appropriate - maybe a more general name could be invented for both the
duke@435 230 // contiguous and noncontiguous spaces.
duke@435 231
duke@435 232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 233 _bt.single_block(start, the_end);
duke@435 234 return end();
duke@435 235 }
duke@435 236
duke@435 237 // Initialize them to NULL.
duke@435 238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
duke@435 239 for (size_t i = 0; i < IndexSetSize; i++) {
duke@435 240 // Note that on platforms where objects are double word aligned,
duke@435 241 // the odd array elements are not used. It is convenient, however,
duke@435 242 // to map directly from the object size to the array element.
duke@435 243 _indexedFreeList[i].reset(IndexSetSize);
duke@435 244 _indexedFreeList[i].set_size(i);
duke@435 245 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 246 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 247 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 248 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 249 }
duke@435 250 }
duke@435 251
duke@435 252 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
ysr@3264 253 for (size_t i = 1; i < IndexSetSize; i++) {
duke@435 254 assert(_indexedFreeList[i].size() == (size_t) i,
duke@435 255 "Indexed free list sizes are incorrect");
duke@435 256 _indexedFreeList[i].reset(IndexSetSize);
duke@435 257 assert(_indexedFreeList[i].count() == 0, "reset check failed");
duke@435 258 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
duke@435 259 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
duke@435 260 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
duke@435 261 }
duke@435 262 }
duke@435 263
duke@435 264 void CompactibleFreeListSpace::reset(MemRegion mr) {
duke@435 265 resetIndexedFreeListArray();
duke@435 266 dictionary()->reset();
duke@435 267 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 268 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
duke@435 269 // Everything's allocated until proven otherwise.
duke@435 270 _bt.set_unallocated_block(end());
duke@435 271 }
duke@435 272 if (!mr.is_empty()) {
duke@435 273 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
duke@435 274 _bt.single_block(mr.start(), mr.word_size());
duke@435 275 FreeChunk* fc = (FreeChunk*) mr.start();
duke@435 276 fc->setSize(mr.word_size());
duke@435 277 if (mr.word_size() >= IndexSetSize ) {
duke@435 278 returnChunkToDictionary(fc);
duke@435 279 } else {
duke@435 280 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 281 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
duke@435 282 }
duke@435 283 }
duke@435 284 _promoInfo.reset();
duke@435 285 _smallLinearAllocBlock._ptr = NULL;
duke@435 286 _smallLinearAllocBlock._word_size = 0;
duke@435 287 }
duke@435 288
duke@435 289 void CompactibleFreeListSpace::reset_after_compaction() {
duke@435 290 // Reset the space to the new reality - one free chunk.
duke@435 291 MemRegion mr(compaction_top(), end());
duke@435 292 reset(mr);
duke@435 293 // Now refill the linear allocation block(s) if possible.
duke@435 294 if (_adaptive_freelists) {
duke@435 295 refillLinearAllocBlocksIfNeeded();
duke@435 296 } else {
duke@435 297 // Place as much of mr in the linAB as we can get,
duke@435 298 // provided it was big enough to go into the dictionary.
duke@435 299 FreeChunk* fc = dictionary()->findLargestDict();
duke@435 300 if (fc != NULL) {
duke@435 301 assert(fc->size() == mr.word_size(),
duke@435 302 "Why was the chunk broken up?");
duke@435 303 removeChunkFromDictionary(fc);
duke@435 304 HeapWord* addr = (HeapWord*) fc;
duke@435 305 _smallLinearAllocBlock.set(addr, fc->size() ,
duke@435 306 1024*SmallForLinearAlloc, fc->size());
duke@435 307 // Note that _unallocated_block is not updated here.
duke@435 308 }
duke@435 309 }
duke@435 310 }
duke@435 311
duke@435 312 // Walks the entire dictionary, returning a coterminal
duke@435 313 // chunk, if it exists. Use with caution since it involves
duke@435 314 // a potentially complete walk of a potentially large tree.
duke@435 315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
duke@435 316
duke@435 317 assert_lock_strong(&_freelistLock);
duke@435 318
duke@435 319 return dictionary()->find_chunk_ends_at(end());
duke@435 320 }
duke@435 321
duke@435 322
duke@435 323 #ifndef PRODUCT
duke@435 324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
duke@435 325 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 326 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
duke@435 327 }
duke@435 328 }
duke@435 329
duke@435 330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
duke@435 331 size_t sum = 0;
duke@435 332 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 333 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
duke@435 334 }
duke@435 335 return sum;
duke@435 336 }
duke@435 337
duke@435 338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
duke@435 339 size_t count = 0;
ysr@3264 340 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
duke@435 341 debug_only(
duke@435 342 ssize_t total_list_count = 0;
duke@435 343 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 344 fc = fc->next()) {
duke@435 345 total_list_count++;
duke@435 346 }
duke@435 347 assert(total_list_count == _indexedFreeList[i].count(),
duke@435 348 "Count in list is incorrect");
duke@435 349 )
duke@435 350 count += _indexedFreeList[i].count();
duke@435 351 }
duke@435 352 return count;
duke@435 353 }
duke@435 354
duke@435 355 size_t CompactibleFreeListSpace::totalCount() {
duke@435 356 size_t num = totalCountInIndexedFreeLists();
duke@435 357 num += dictionary()->totalCount();
duke@435 358 if (_smallLinearAllocBlock._word_size != 0) {
duke@435 359 num++;
duke@435 360 }
duke@435 361 return num;
duke@435 362 }
duke@435 363 #endif
duke@435 364
duke@435 365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
duke@435 366 FreeChunk* fc = (FreeChunk*) p;
duke@435 367 return fc->isFree();
duke@435 368 }
duke@435 369
duke@435 370 size_t CompactibleFreeListSpace::used() const {
duke@435 371 return capacity() - free();
duke@435 372 }
duke@435 373
duke@435 374 size_t CompactibleFreeListSpace::free() const {
duke@435 375 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
duke@435 376 // if you do this while the structures are in flux you
duke@435 377 // may get an approximate answer only; for instance
duke@435 378 // because there is concurrent allocation either
duke@435 379 // directly by mutators or for promotion during a GC.
duke@435 380 // It's "MT-safe", however, in the sense that you are guaranteed
duke@435 381 // not to crash and burn, for instance, because of walking
duke@435 382 // pointers that could disappear as you were walking them.
duke@435 383 // The approximation is because the various components
duke@435 384 // that are read below are not read atomically (and
duke@435 385 // further the computation of totalSizeInIndexedFreeLists()
duke@435 386 // is itself a non-atomic computation. The normal use of
duke@435 387 // this is during a resize operation at the end of GC
duke@435 388 // and at that time you are guaranteed to get the
duke@435 389 // correct actual value. However, for instance, this is
duke@435 390 // also read completely asynchronously by the "perf-sampler"
duke@435 391 // that supports jvmstat, and you are apt to see the values
duke@435 392 // flicker in such cases.
duke@435 393 assert(_dictionary != NULL, "No _dictionary?");
duke@435 394 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
duke@435 395 totalSizeInIndexedFreeLists() +
duke@435 396 _smallLinearAllocBlock._word_size) * HeapWordSize;
duke@435 397 }
duke@435 398
duke@435 399 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
duke@435 400 assert(_dictionary != NULL, "No _dictionary?");
duke@435 401 assert_locked();
duke@435 402 size_t res = _dictionary->maxChunkSize();
duke@435 403 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
duke@435 404 (size_t) SmallForLinearAlloc - 1));
duke@435 405 // XXX the following could potentially be pretty slow;
duke@435 406 // should one, pesimally for the rare cases when res
duke@435 407 // caclulated above is less than IndexSetSize,
duke@435 408 // just return res calculated above? My reasoning was that
duke@435 409 // those cases will be so rare that the extra time spent doesn't
duke@435 410 // really matter....
duke@435 411 // Note: do not change the loop test i >= res + IndexSetStride
duke@435 412 // to i > res below, because i is unsigned and res may be zero.
duke@435 413 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
duke@435 414 i -= IndexSetStride) {
duke@435 415 if (_indexedFreeList[i].head() != NULL) {
duke@435 416 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 417 return i;
duke@435 418 }
duke@435 419 }
duke@435 420 return res;
duke@435 421 }
duke@435 422
ysr@2071 423 void LinearAllocBlock::print_on(outputStream* st) const {
ysr@2071 424 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
ysr@2071 425 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
ysr@2071 426 _ptr, _word_size, _refillSize, _allocation_size_limit);
ysr@2071 427 }
ysr@2071 428
ysr@2071 429 void CompactibleFreeListSpace::print_on(outputStream* st) const {
ysr@2071 430 st->print_cr("COMPACTIBLE FREELIST SPACE");
ysr@2071 431 st->print_cr(" Space:");
ysr@2071 432 Space::print_on(st);
ysr@2071 433
ysr@2071 434 st->print_cr("promoInfo:");
ysr@2071 435 _promoInfo.print_on(st);
ysr@2071 436
ysr@2071 437 st->print_cr("_smallLinearAllocBlock");
ysr@2071 438 _smallLinearAllocBlock.print_on(st);
ysr@2071 439
ysr@2071 440 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
ysr@2071 441
ysr@2071 442 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
ysr@2071 443 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
ysr@2071 444 }
ysr@2071 445
ysr@1580 446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
ysr@1580 447 const {
ysr@1580 448 reportIndexedFreeListStatistics();
ysr@1580 449 gclog_or_tty->print_cr("Layout of Indexed Freelists");
ysr@1580 450 gclog_or_tty->print_cr("---------------------------");
ysr@1580 451 FreeList::print_labels_on(st, "size");
ysr@1580 452 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
ysr@1580 453 _indexedFreeList[i].print_on(gclog_or_tty);
ysr@1580 454 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
ysr@1580 455 fc = fc->next()) {
ysr@1580 456 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
ysr@1580 457 fc, (HeapWord*)fc + i,
ysr@1580 458 fc->cantCoalesce() ? "\t CC" : "");
ysr@1580 459 }
ysr@1580 460 }
ysr@1580 461 }
ysr@1580 462
ysr@1580 463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
ysr@1580 464 const {
ysr@1580 465 _promoInfo.print_on(st);
ysr@1580 466 }
ysr@1580 467
ysr@1580 468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
ysr@1580 469 const {
ysr@1580 470 _dictionary->reportStatistics();
ysr@1580 471 st->print_cr("Layout of Freelists in Tree");
ysr@1580 472 st->print_cr("---------------------------");
ysr@1580 473 _dictionary->print_free_lists(st);
ysr@1580 474 }
ysr@1580 475
ysr@1580 476 class BlkPrintingClosure: public BlkClosure {
ysr@1580 477 const CMSCollector* _collector;
ysr@1580 478 const CompactibleFreeListSpace* _sp;
ysr@1580 479 const CMSBitMap* _live_bit_map;
ysr@1580 480 const bool _post_remark;
ysr@1580 481 outputStream* _st;
ysr@1580 482 public:
ysr@1580 483 BlkPrintingClosure(const CMSCollector* collector,
ysr@1580 484 const CompactibleFreeListSpace* sp,
ysr@1580 485 const CMSBitMap* live_bit_map,
ysr@1580 486 outputStream* st):
ysr@1580 487 _collector(collector),
ysr@1580 488 _sp(sp),
ysr@1580 489 _live_bit_map(live_bit_map),
ysr@1580 490 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
ysr@1580 491 _st(st) { }
ysr@1580 492 size_t do_blk(HeapWord* addr);
ysr@1580 493 };
ysr@1580 494
ysr@1580 495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
ysr@1580 496 size_t sz = _sp->block_size_no_stall(addr, _collector);
ysr@1580 497 assert(sz != 0, "Should always be able to compute a size");
ysr@1580 498 if (_sp->block_is_obj(addr)) {
ysr@1580 499 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
ysr@1580 500 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
ysr@1580 501 addr,
ysr@1580 502 dead ? "dead" : "live",
ysr@1580 503 sz,
ysr@1580 504 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
ysr@1580 505 if (CMSPrintObjectsInDump && !dead) {
ysr@1580 506 oop(addr)->print_on(_st);
ysr@1580 507 _st->print_cr("--------------------------------------");
ysr@1580 508 }
ysr@1580 509 } else { // free block
ysr@1580 510 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
ysr@1580 511 addr, sz, CMSPrintChunksInDump ? ":" : ".");
ysr@1580 512 if (CMSPrintChunksInDump) {
ysr@1580 513 ((FreeChunk*)addr)->print_on(_st);
ysr@1580 514 _st->print_cr("--------------------------------------");
ysr@1580 515 }
ysr@1580 516 }
ysr@1580 517 return sz;
ysr@1580 518 }
ysr@1580 519
ysr@1580 520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
ysr@1580 521 outputStream* st) {
ysr@1580 522 st->print_cr("\n=========================");
ysr@1580 523 st->print_cr("Block layout in CMS Heap:");
ysr@1580 524 st->print_cr("=========================");
ysr@1580 525 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
ysr@1580 526 blk_iterate(&bpcl);
ysr@1580 527
ysr@1580 528 st->print_cr("\n=======================================");
ysr@1580 529 st->print_cr("Order & Layout of Promotion Info Blocks");
ysr@1580 530 st->print_cr("=======================================");
ysr@1580 531 print_promo_info_blocks(st);
ysr@1580 532
ysr@1580 533 st->print_cr("\n===========================");
ysr@1580 534 st->print_cr("Order of Indexed Free Lists");
ysr@1580 535 st->print_cr("=========================");
ysr@1580 536 print_indexed_free_lists(st);
ysr@1580 537
ysr@1580 538 st->print_cr("\n=================================");
ysr@1580 539 st->print_cr("Order of Free Lists in Dictionary");
ysr@1580 540 st->print_cr("=================================");
ysr@1580 541 print_dictionary_free_lists(st);
ysr@1580 542 }
ysr@1580 543
ysr@1580 544
duke@435 545 void CompactibleFreeListSpace::reportFreeListStatistics() const {
duke@435 546 assert_lock_strong(&_freelistLock);
duke@435 547 assert(PrintFLSStatistics != 0, "Reporting error");
duke@435 548 _dictionary->reportStatistics();
duke@435 549 if (PrintFLSStatistics > 1) {
duke@435 550 reportIndexedFreeListStatistics();
duke@435 551 size_t totalSize = totalSizeInIndexedFreeLists() +
duke@435 552 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 553 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
duke@435 554 }
duke@435 555 }
duke@435 556
duke@435 557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
duke@435 558 assert_lock_strong(&_freelistLock);
duke@435 559 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
duke@435 560 "--------------------------------\n");
duke@435 561 size_t totalSize = totalSizeInIndexedFreeLists();
duke@435 562 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
duke@435 563 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
duke@435 564 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
duke@435 565 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
duke@435 566 if (freeBlocks != 0) {
duke@435 567 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
duke@435 568 }
duke@435 569 }
duke@435 570
duke@435 571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
duke@435 572 size_t res = 0;
duke@435 573 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 574 debug_only(
duke@435 575 ssize_t recount = 0;
duke@435 576 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 577 fc = fc->next()) {
duke@435 578 recount += 1;
duke@435 579 }
duke@435 580 assert(recount == _indexedFreeList[i].count(),
duke@435 581 "Incorrect count in list");
duke@435 582 )
duke@435 583 res += _indexedFreeList[i].count();
duke@435 584 }
duke@435 585 return res;
duke@435 586 }
duke@435 587
duke@435 588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
duke@435 589 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 590 if (_indexedFreeList[i].head() != NULL) {
duke@435 591 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
duke@435 592 return (size_t)i;
duke@435 593 }
duke@435 594 }
duke@435 595 return 0;
duke@435 596 }
duke@435 597
duke@435 598 void CompactibleFreeListSpace::set_end(HeapWord* value) {
duke@435 599 HeapWord* prevEnd = end();
duke@435 600 assert(prevEnd != value, "unnecessary set_end call");
ysr@2071 601 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 602 "New end is below unallocated block");
duke@435 603 _end = value;
duke@435 604 if (prevEnd != NULL) {
duke@435 605 // Resize the underlying block offset table.
duke@435 606 _bt.resize(pointer_delta(value, bottom()));
ysr@1580 607 if (value <= prevEnd) {
ysr@2071 608 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
ysr@2071 609 "New end is below unallocated block");
ysr@1580 610 } else {
ysr@1580 611 // Now, take this new chunk and add it to the free blocks.
ysr@1580 612 // Note that the BOT has not yet been updated for this block.
ysr@1580 613 size_t newFcSize = pointer_delta(value, prevEnd);
ysr@1580 614 // XXX This is REALLY UGLY and should be fixed up. XXX
ysr@1580 615 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
ysr@1580 616 // Mark the boundary of the new block in BOT
ysr@1580 617 _bt.mark_block(prevEnd, value);
ysr@1580 618 // put it all in the linAB
ysr@1580 619 if (ParallelGCThreads == 0) {
ysr@1580 620 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 621 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 622 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 623 } else { // ParallelGCThreads > 0
ysr@1580 624 MutexLockerEx x(parDictionaryAllocLock(),
ysr@1580 625 Mutex::_no_safepoint_check_flag);
ysr@1580 626 _smallLinearAllocBlock._ptr = prevEnd;
ysr@1580 627 _smallLinearAllocBlock._word_size = newFcSize;
ysr@1580 628 repairLinearAllocBlock(&_smallLinearAllocBlock);
ysr@1580 629 }
ysr@1580 630 // Births of chunks put into a LinAB are not recorded. Births
ysr@1580 631 // of chunks as they are allocated out of a LinAB are.
ysr@1580 632 } else {
ysr@1580 633 // Add the block to the free lists, if possible coalescing it
ysr@1580 634 // with the last free block, and update the BOT and census data.
ysr@1580 635 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
duke@435 636 }
duke@435 637 }
duke@435 638 }
duke@435 639 }
duke@435 640
duke@435 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
duke@435 642 CompactibleFreeListSpace* _cfls;
duke@435 643 CMSCollector* _collector;
duke@435 644 protected:
duke@435 645 // Override.
duke@435 646 #define walk_mem_region_with_cl_DECL(ClosureType) \
duke@435 647 virtual void walk_mem_region_with_cl(MemRegion mr, \
duke@435 648 HeapWord* bottom, HeapWord* top, \
duke@435 649 ClosureType* cl); \
duke@435 650 void walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 651 HeapWord* bottom, HeapWord* top, \
duke@435 652 ClosureType* cl); \
duke@435 653 void walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 654 HeapWord* bottom, HeapWord* top, \
duke@435 655 ClosureType* cl)
duke@435 656 walk_mem_region_with_cl_DECL(OopClosure);
duke@435 657 walk_mem_region_with_cl_DECL(FilteringClosure);
duke@435 658
duke@435 659 public:
duke@435 660 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
duke@435 661 CMSCollector* collector,
duke@435 662 OopClosure* cl,
duke@435 663 CardTableModRefBS::PrecisionStyle precision,
duke@435 664 HeapWord* boundary) :
duke@435 665 Filtering_DCTOC(sp, cl, precision, boundary),
duke@435 666 _cfls(sp), _collector(collector) {}
duke@435 667 };
duke@435 668
duke@435 669 // We de-virtualize the block-related calls below, since we know that our
duke@435 670 // space is a CompactibleFreeListSpace.
jmasa@3294 671
duke@435 672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 674 HeapWord* bottom, \
duke@435 675 HeapWord* top, \
duke@435 676 ClosureType* cl) { \
jmasa@3294 677 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
jmasa@3294 678 if (is_par) { \
jmasa@3294 679 assert(SharedHeap::heap()->n_par_threads() == \
jmasa@3294 680 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
duke@435 681 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
duke@435 682 } else { \
duke@435 683 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
duke@435 684 } \
duke@435 685 } \
duke@435 686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
duke@435 687 HeapWord* bottom, \
duke@435 688 HeapWord* top, \
duke@435 689 ClosureType* cl) { \
duke@435 690 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 691 back too far. */ \
duke@435 692 HeapWord* mr_start = mr.start(); \
duke@435 693 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 694 HeapWord* next = bottom + bot_size; \
duke@435 695 while (next < mr_start) { \
duke@435 696 bottom = next; \
duke@435 697 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 698 next = bottom + bot_size; \
duke@435 699 } \
duke@435 700 \
duke@435 701 while (bottom < top) { \
duke@435 702 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
duke@435 703 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 704 oop(bottom)) && \
duke@435 705 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 706 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 707 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 708 } else { \
duke@435 709 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
duke@435 710 } \
duke@435 711 } \
duke@435 712 } \
duke@435 713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
duke@435 714 HeapWord* bottom, \
duke@435 715 HeapWord* top, \
duke@435 716 ClosureType* cl) { \
duke@435 717 /* Skip parts that are before "mr", in case "block_start" sent us \
duke@435 718 back too far. */ \
duke@435 719 HeapWord* mr_start = mr.start(); \
duke@435 720 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 721 HeapWord* next = bottom + bot_size; \
duke@435 722 while (next < mr_start) { \
duke@435 723 bottom = next; \
duke@435 724 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 725 next = bottom + bot_size; \
duke@435 726 } \
duke@435 727 \
duke@435 728 while (bottom < top) { \
duke@435 729 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
duke@435 730 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
duke@435 731 oop(bottom)) && \
duke@435 732 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
duke@435 733 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
duke@435 734 bottom += _cfls->adjustObjectSize(word_sz); \
duke@435 735 } else { \
duke@435 736 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
duke@435 737 } \
duke@435 738 } \
duke@435 739 }
duke@435 740
duke@435 741 // (There are only two of these, rather than N, because the split is due
duke@435 742 // only to the introduction of the FilteringClosure, a local part of the
duke@435 743 // impl of this abstraction.)
duke@435 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
duke@435 745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 746
duke@435 747 DirtyCardToOopClosure*
duke@435 748 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
duke@435 749 CardTableModRefBS::PrecisionStyle precision,
duke@435 750 HeapWord* boundary) {
duke@435 751 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
duke@435 752 }
duke@435 753
duke@435 754
duke@435 755 // Note on locking for the space iteration functions:
duke@435 756 // since the collector's iteration activities are concurrent with
duke@435 757 // allocation activities by mutators, absent a suitable mutual exclusion
duke@435 758 // mechanism the iterators may go awry. For instace a block being iterated
duke@435 759 // may suddenly be allocated or divided up and part of it allocated and
duke@435 760 // so on.
duke@435 761
duke@435 762 // Apply the given closure to each block in the space.
duke@435 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
duke@435 764 assert_lock_strong(freelistLock());
duke@435 765 HeapWord *cur, *limit;
duke@435 766 for (cur = bottom(), limit = end(); cur < limit;
duke@435 767 cur += cl->do_blk_careful(cur));
duke@435 768 }
duke@435 769
duke@435 770 // Apply the given closure to each block in the space.
duke@435 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
duke@435 772 assert_lock_strong(freelistLock());
duke@435 773 HeapWord *cur, *limit;
duke@435 774 for (cur = bottom(), limit = end(); cur < limit;
duke@435 775 cur += cl->do_blk(cur));
duke@435 776 }
duke@435 777
duke@435 778 // Apply the given closure to each oop in the space.
duke@435 779 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
duke@435 780 assert_lock_strong(freelistLock());
duke@435 781 HeapWord *cur, *limit;
duke@435 782 size_t curSize;
duke@435 783 for (cur = bottom(), limit = end(); cur < limit;
duke@435 784 cur += curSize) {
duke@435 785 curSize = block_size(cur);
duke@435 786 if (block_is_obj(cur)) {
duke@435 787 oop(cur)->oop_iterate(cl);
duke@435 788 }
duke@435 789 }
duke@435 790 }
duke@435 791
duke@435 792 // Apply the given closure to each oop in the space \intersect memory region.
duke@435 793 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 794 assert_lock_strong(freelistLock());
duke@435 795 if (is_empty()) {
duke@435 796 return;
duke@435 797 }
duke@435 798 MemRegion cur = MemRegion(bottom(), end());
duke@435 799 mr = mr.intersection(cur);
duke@435 800 if (mr.is_empty()) {
duke@435 801 return;
duke@435 802 }
duke@435 803 if (mr.equals(cur)) {
duke@435 804 oop_iterate(cl);
duke@435 805 return;
duke@435 806 }
duke@435 807 assert(mr.end() <= end(), "just took an intersection above");
duke@435 808 HeapWord* obj_addr = block_start(mr.start());
duke@435 809 HeapWord* t = mr.end();
duke@435 810
duke@435 811 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
duke@435 812 if (block_is_obj(obj_addr)) {
duke@435 813 // Handle first object specially.
duke@435 814 oop obj = oop(obj_addr);
duke@435 815 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
duke@435 816 } else {
duke@435 817 FreeChunk* fc = (FreeChunk*)obj_addr;
duke@435 818 obj_addr += fc->size();
duke@435 819 }
duke@435 820 while (obj_addr < t) {
duke@435 821 HeapWord* obj = obj_addr;
duke@435 822 obj_addr += block_size(obj_addr);
duke@435 823 // If "obj_addr" is not greater than top, then the
duke@435 824 // entire object "obj" is within the region.
duke@435 825 if (obj_addr <= t) {
duke@435 826 if (block_is_obj(obj)) {
duke@435 827 oop(obj)->oop_iterate(cl);
duke@435 828 }
duke@435 829 } else {
duke@435 830 // "obj" extends beyond end of region
duke@435 831 if (block_is_obj(obj)) {
duke@435 832 oop(obj)->oop_iterate(&smr_blk);
duke@435 833 }
duke@435 834 break;
duke@435 835 }
duke@435 836 }
duke@435 837 }
duke@435 838
duke@435 839 // NOTE: In the following methods, in order to safely be able to
duke@435 840 // apply the closure to an object, we need to be sure that the
duke@435 841 // object has been initialized. We are guaranteed that an object
duke@435 842 // is initialized if we are holding the Heap_lock with the
duke@435 843 // world stopped.
duke@435 844 void CompactibleFreeListSpace::verify_objects_initialized() const {
duke@435 845 if (is_init_completed()) {
duke@435 846 assert_locked_or_safepoint(Heap_lock);
duke@435 847 if (Universe::is_fully_initialized()) {
duke@435 848 guarantee(SafepointSynchronize::is_at_safepoint(),
duke@435 849 "Required for objects to be initialized");
duke@435 850 }
duke@435 851 } // else make a concession at vm start-up
duke@435 852 }
duke@435 853
duke@435 854 // Apply the given closure to each object in the space
duke@435 855 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
duke@435 856 assert_lock_strong(freelistLock());
duke@435 857 NOT_PRODUCT(verify_objects_initialized());
duke@435 858 HeapWord *cur, *limit;
duke@435 859 size_t curSize;
duke@435 860 for (cur = bottom(), limit = end(); cur < limit;
duke@435 861 cur += curSize) {
duke@435 862 curSize = block_size(cur);
duke@435 863 if (block_is_obj(cur)) {
duke@435 864 blk->do_object(oop(cur));
duke@435 865 }
duke@435 866 }
duke@435 867 }
duke@435 868
jmasa@952 869 // Apply the given closure to each live object in the space
jmasa@952 870 // The usage of CompactibleFreeListSpace
jmasa@952 871 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 872 // objects in the space with references to objects that are no longer
jmasa@952 873 // valid. For example, an object may reference another object
jmasa@952 874 // that has already been sweep up (collected). This method uses
jmasa@952 875 // obj_is_alive() to determine whether it is safe to apply the closure to
jmasa@952 876 // an object. See obj_is_alive() for details on how liveness of an
jmasa@952 877 // object is decided.
jmasa@952 878
jmasa@952 879 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 880 assert_lock_strong(freelistLock());
jmasa@952 881 NOT_PRODUCT(verify_objects_initialized());
jmasa@952 882 HeapWord *cur, *limit;
jmasa@952 883 size_t curSize;
jmasa@952 884 for (cur = bottom(), limit = end(); cur < limit;
jmasa@952 885 cur += curSize) {
jmasa@952 886 curSize = block_size(cur);
jmasa@952 887 if (block_is_obj(cur) && obj_is_alive(cur)) {
jmasa@952 888 blk->do_object(oop(cur));
jmasa@952 889 }
jmasa@952 890 }
jmasa@952 891 }
jmasa@952 892
duke@435 893 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
duke@435 894 UpwardsObjectClosure* cl) {
ysr@1580 895 assert_locked(freelistLock());
duke@435 896 NOT_PRODUCT(verify_objects_initialized());
duke@435 897 Space::object_iterate_mem(mr, cl);
duke@435 898 }
duke@435 899
duke@435 900 // Callers of this iterator beware: The closure application should
duke@435 901 // be robust in the face of uninitialized objects and should (always)
duke@435 902 // return a correct size so that the next addr + size below gives us a
duke@435 903 // valid block boundary. [See for instance,
duke@435 904 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 905 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 906 HeapWord*
duke@435 907 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
duke@435 908 assert_lock_strong(freelistLock());
duke@435 909 HeapWord *addr, *last;
duke@435 910 size_t size;
duke@435 911 for (addr = bottom(), last = end();
duke@435 912 addr < last; addr += size) {
duke@435 913 FreeChunk* fc = (FreeChunk*)addr;
duke@435 914 if (fc->isFree()) {
duke@435 915 // Since we hold the free list lock, which protects direct
duke@435 916 // allocation in this generation by mutators, a free object
duke@435 917 // will remain free throughout this iteration code.
duke@435 918 size = fc->size();
duke@435 919 } else {
duke@435 920 // Note that the object need not necessarily be initialized,
duke@435 921 // because (for instance) the free list lock does NOT protect
duke@435 922 // object initialization. The closure application below must
duke@435 923 // therefore be correct in the face of uninitialized objects.
duke@435 924 size = cl->do_object_careful(oop(addr));
duke@435 925 if (size == 0) {
duke@435 926 // An unparsable object found. Signal early termination.
duke@435 927 return addr;
duke@435 928 }
duke@435 929 }
duke@435 930 }
duke@435 931 return NULL;
duke@435 932 }
duke@435 933
duke@435 934 // Callers of this iterator beware: The closure application should
duke@435 935 // be robust in the face of uninitialized objects and should (always)
duke@435 936 // return a correct size so that the next addr + size below gives us a
duke@435 937 // valid block boundary. [See for instance,
duke@435 938 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
duke@435 939 // in ConcurrentMarkSweepGeneration.cpp.]
duke@435 940 HeapWord*
duke@435 941 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
duke@435 942 ObjectClosureCareful* cl) {
duke@435 943 assert_lock_strong(freelistLock());
duke@435 944 // Can't use used_region() below because it may not necessarily
duke@435 945 // be the same as [bottom(),end()); although we could
duke@435 946 // use [used_region().start(),round_to(used_region().end(),CardSize)),
duke@435 947 // that appears too cumbersome, so we just do the simpler check
duke@435 948 // in the assertion below.
duke@435 949 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
duke@435 950 "mr should be non-empty and within used space");
duke@435 951 HeapWord *addr, *end;
duke@435 952 size_t size;
duke@435 953 for (addr = block_start_careful(mr.start()), end = mr.end();
duke@435 954 addr < end; addr += size) {
duke@435 955 FreeChunk* fc = (FreeChunk*)addr;
duke@435 956 if (fc->isFree()) {
duke@435 957 // Since we hold the free list lock, which protects direct
duke@435 958 // allocation in this generation by mutators, a free object
duke@435 959 // will remain free throughout this iteration code.
duke@435 960 size = fc->size();
duke@435 961 } else {
duke@435 962 // Note that the object need not necessarily be initialized,
duke@435 963 // because (for instance) the free list lock does NOT protect
duke@435 964 // object initialization. The closure application below must
duke@435 965 // therefore be correct in the face of uninitialized objects.
duke@435 966 size = cl->do_object_careful_m(oop(addr), mr);
duke@435 967 if (size == 0) {
duke@435 968 // An unparsable object found. Signal early termination.
duke@435 969 return addr;
duke@435 970 }
duke@435 971 }
duke@435 972 }
duke@435 973 return NULL;
duke@435 974 }
duke@435 975
duke@435 976
ysr@777 977 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
duke@435 978 NOT_PRODUCT(verify_objects_initialized());
duke@435 979 return _bt.block_start(p);
duke@435 980 }
duke@435 981
duke@435 982 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
duke@435 983 return _bt.block_start_careful(p);
duke@435 984 }
duke@435 985
duke@435 986 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
duke@435 987 NOT_PRODUCT(verify_objects_initialized());
duke@435 988 // This must be volatile, or else there is a danger that the compiler
duke@435 989 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 990 // the value read the first time in a register.
duke@435 991 while (true) {
duke@435 992 // We must do this until we get a consistent view of the object.
coleenp@622 993 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 994 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 995 size_t res = fc->size();
coleenp@622 996 // If the object is still a free chunk, return the size, else it
coleenp@622 997 // has been allocated so try again.
coleenp@622 998 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 999 assert(res != 0, "Block size should not be 0");
duke@435 1000 return res;
duke@435 1001 }
coleenp@622 1002 } else {
coleenp@622 1003 // must read from what 'p' points to in each loop.
coleenp@622 1004 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
coleenp@622 1005 if (k != NULL) {
ysr@2071 1006 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
coleenp@622 1007 oop o = (oop)p;
coleenp@622 1008 assert(o->is_parsable(), "Should be parsable");
coleenp@622 1009 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
coleenp@622 1010 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1011 res = adjustObjectSize(res);
coleenp@622 1012 assert(res != 0, "Block size should not be 0");
coleenp@622 1013 return res;
coleenp@622 1014 }
duke@435 1015 }
duke@435 1016 }
duke@435 1017 }
duke@435 1018
duke@435 1019 // A variant of the above that uses the Printezis bits for
duke@435 1020 // unparsable but allocated objects. This avoids any possible
duke@435 1021 // stalls waiting for mutators to initialize objects, and is
duke@435 1022 // thus potentially faster than the variant above. However,
duke@435 1023 // this variant may return a zero size for a block that is
duke@435 1024 // under mutation and for which a consistent size cannot be
duke@435 1025 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
duke@435 1026 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
duke@435 1027 const CMSCollector* c)
duke@435 1028 const {
duke@435 1029 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1030 // This must be volatile, or else there is a danger that the compiler
duke@435 1031 // will compile the code below into a sometimes-infinite loop, by keeping
duke@435 1032 // the value read the first time in a register.
duke@435 1033 DEBUG_ONLY(uint loops = 0;)
duke@435 1034 while (true) {
duke@435 1035 // We must do this until we get a consistent view of the object.
coleenp@622 1036 if (FreeChunk::indicatesFreeChunk(p)) {
coleenp@622 1037 volatile FreeChunk* fc = (volatile FreeChunk*)p;
coleenp@622 1038 size_t res = fc->size();
coleenp@622 1039 if (FreeChunk::indicatesFreeChunk(p)) {
duke@435 1040 assert(res != 0, "Block size should not be 0");
duke@435 1041 assert(loops == 0, "Should be 0");
duke@435 1042 return res;
duke@435 1043 }
duke@435 1044 } else {
coleenp@622 1045 // must read from what 'p' points to in each loop.
coleenp@622 1046 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
ysr@2533 1047 // We trust the size of any object that has a non-NULL
ysr@2533 1048 // klass and (for those in the perm gen) is parsable
ysr@2533 1049 // -- irrespective of its conc_safe-ty.
ysr@2533 1050 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
coleenp@622 1051 assert(k->is_oop(), "Should really be klass oop.");
coleenp@622 1052 oop o = (oop)p;
coleenp@622 1053 assert(o->is_oop(), "Should be an oop");
coleenp@622 1054 size_t res = o->size_given_klass(k->klass_part());
coleenp@622 1055 res = adjustObjectSize(res);
coleenp@622 1056 assert(res != 0, "Block size should not be 0");
coleenp@622 1057 return res;
coleenp@622 1058 } else {
ysr@2533 1059 // May return 0 if P-bits not present.
coleenp@622 1060 return c->block_size_if_printezis_bits(p);
coleenp@622 1061 }
duke@435 1062 }
duke@435 1063 assert(loops == 0, "Can loop at most once");
duke@435 1064 DEBUG_ONLY(loops++;)
duke@435 1065 }
duke@435 1066 }
duke@435 1067
duke@435 1068 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
duke@435 1069 NOT_PRODUCT(verify_objects_initialized());
duke@435 1070 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
duke@435 1071 FreeChunk* fc = (FreeChunk*)p;
duke@435 1072 if (fc->isFree()) {
duke@435 1073 return fc->size();
duke@435 1074 } else {
duke@435 1075 // Ignore mark word because this may be a recently promoted
duke@435 1076 // object whose mark word is used to chain together grey
duke@435 1077 // objects (the last one would have a null value).
duke@435 1078 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1079 return adjustObjectSize(oop(p)->size());
duke@435 1080 }
duke@435 1081 }
duke@435 1082
duke@435 1083 // This implementation assumes that the property of "being an object" is
duke@435 1084 // stable. But being a free chunk may not be (because of parallel
duke@435 1085 // promotion.)
duke@435 1086 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
duke@435 1087 FreeChunk* fc = (FreeChunk*)p;
duke@435 1088 assert(is_in_reserved(p), "Should be in space");
duke@435 1089 // When doing a mark-sweep-compact of the CMS generation, this
duke@435 1090 // assertion may fail because prepare_for_compaction() uses
duke@435 1091 // space that is garbage to maintain information on ranges of
duke@435 1092 // live objects so that these live ranges can be moved as a whole.
duke@435 1093 // Comment out this assertion until that problem can be solved
duke@435 1094 // (i.e., that the block start calculation may look at objects
duke@435 1095 // at address below "p" in finding the object that contains "p"
duke@435 1096 // and those objects (if garbage) may have been modified to hold
duke@435 1097 // live range information.
jmasa@2188 1098 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
jmasa@2188 1099 // "Should be a block boundary");
coleenp@622 1100 if (FreeChunk::indicatesFreeChunk(p)) return false;
coleenp@622 1101 klassOop k = oop(p)->klass_or_null();
duke@435 1102 if (k != NULL) {
duke@435 1103 // Ignore mark word because it may have been used to
duke@435 1104 // chain together promoted objects (the last one
duke@435 1105 // would have a null value).
duke@435 1106 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1107 return true;
duke@435 1108 } else {
duke@435 1109 return false; // Was not an object at the start of collection.
duke@435 1110 }
duke@435 1111 }
duke@435 1112
duke@435 1113 // Check if the object is alive. This fact is checked either by consulting
duke@435 1114 // the main marking bitmap in the sweeping phase or, if it's a permanent
duke@435 1115 // generation and we're not in the sweeping phase, by checking the
duke@435 1116 // perm_gen_verify_bit_map where we store the "deadness" information if
duke@435 1117 // we did not sweep the perm gen in the most recent previous GC cycle.
duke@435 1118 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
ysr@2301 1119 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
ysr@2301 1120 "Else races are possible");
ysr@2293 1121 assert(block_is_obj(p), "The address should point to an object");
duke@435 1122
duke@435 1123 // If we're sweeping, we use object liveness information from the main bit map
duke@435 1124 // for both perm gen and old gen.
duke@435 1125 // We don't need to lock the bitmap (live_map or dead_map below), because
duke@435 1126 // EITHER we are in the middle of the sweeping phase, and the
duke@435 1127 // main marking bit map (live_map below) is locked,
duke@435 1128 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
duke@435 1129 // is stable, because it's mutated only in the sweeping phase.
ysr@2293 1130 // NOTE: This method is also used by jmap where, if class unloading is
ysr@2293 1131 // off, the results can return "false" for legitimate perm objects,
ysr@2293 1132 // when we are not in the midst of a sweeping phase, which can result
ysr@2293 1133 // in jmap not reporting certain perm gen objects. This will be moot
ysr@2293 1134 // if/when the perm gen goes away in the future.
duke@435 1135 if (_collector->abstract_state() == CMSCollector::Sweeping) {
duke@435 1136 CMSBitMap* live_map = _collector->markBitMap();
ysr@2293 1137 return live_map->par_isMarked((HeapWord*) p);
duke@435 1138 } else {
duke@435 1139 // If we're not currently sweeping and we haven't swept the perm gen in
duke@435 1140 // the previous concurrent cycle then we may have dead but unswept objects
duke@435 1141 // in the perm gen. In this case, we use the "deadness" information
duke@435 1142 // that we had saved in perm_gen_verify_bit_map at the last sweep.
duke@435 1143 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
duke@435 1144 if (_collector->verifying()) {
duke@435 1145 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
duke@435 1146 // Object is marked in the dead_map bitmap at the previous sweep
duke@435 1147 // when we know that it's dead; if the bitmap is not allocated then
duke@435 1148 // the object is alive.
duke@435 1149 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
duke@435 1150 || !dead_map->par_isMarked((HeapWord*) p);
duke@435 1151 } else {
duke@435 1152 return false; // We can't say for sure if it's live, so we say that it's dead.
duke@435 1153 }
duke@435 1154 }
duke@435 1155 }
duke@435 1156 return true;
duke@435 1157 }
duke@435 1158
duke@435 1159 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
duke@435 1160 FreeChunk* fc = (FreeChunk*)p;
duke@435 1161 assert(is_in_reserved(p), "Should be in space");
duke@435 1162 assert(_bt.block_start(p) == p, "Should be a block boundary");
duke@435 1163 if (!fc->isFree()) {
duke@435 1164 // Ignore mark word because it may have been used to
duke@435 1165 // chain together promoted objects (the last one
duke@435 1166 // would have a null value).
duke@435 1167 assert(oop(p)->is_oop(true), "Should be an oop");
duke@435 1168 return true;
duke@435 1169 }
duke@435 1170 return false;
duke@435 1171 }
duke@435 1172
duke@435 1173 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
duke@435 1174 // approximate answer if you don't hold the freelistlock when you call this.
duke@435 1175 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
duke@435 1176 size_t size = 0;
duke@435 1177 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 1178 debug_only(
duke@435 1179 // We may be calling here without the lock in which case we
duke@435 1180 // won't do this modest sanity check.
duke@435 1181 if (freelistLock()->owned_by_self()) {
duke@435 1182 size_t total_list_size = 0;
duke@435 1183 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
duke@435 1184 fc = fc->next()) {
duke@435 1185 total_list_size += i;
duke@435 1186 }
duke@435 1187 assert(total_list_size == i * _indexedFreeList[i].count(),
duke@435 1188 "Count in list is incorrect");
duke@435 1189 }
duke@435 1190 )
duke@435 1191 size += i * _indexedFreeList[i].count();
duke@435 1192 }
duke@435 1193 return size;
duke@435 1194 }
duke@435 1195
duke@435 1196 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
duke@435 1197 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
duke@435 1198 return allocate(size);
duke@435 1199 }
duke@435 1200
duke@435 1201 HeapWord*
duke@435 1202 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
duke@435 1203 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
duke@435 1204 }
duke@435 1205
duke@435 1206 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
duke@435 1207 assert_lock_strong(freelistLock());
duke@435 1208 HeapWord* res = NULL;
duke@435 1209 assert(size == adjustObjectSize(size),
duke@435 1210 "use adjustObjectSize() before calling into allocate()");
duke@435 1211
duke@435 1212 if (_adaptive_freelists) {
duke@435 1213 res = allocate_adaptive_freelists(size);
duke@435 1214 } else { // non-adaptive free lists
duke@435 1215 res = allocate_non_adaptive_freelists(size);
duke@435 1216 }
duke@435 1217
duke@435 1218 if (res != NULL) {
duke@435 1219 // check that res does lie in this space!
duke@435 1220 assert(is_in_reserved(res), "Not in this space!");
duke@435 1221 assert(is_aligned((void*)res), "alignment check");
duke@435 1222
duke@435 1223 FreeChunk* fc = (FreeChunk*)res;
duke@435 1224 fc->markNotFree();
duke@435 1225 assert(!fc->isFree(), "shouldn't be marked free");
coleenp@622 1226 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
duke@435 1227 // Verify that the block offset table shows this to
duke@435 1228 // be a single block, but not one which is unallocated.
duke@435 1229 _bt.verify_single_block(res, size);
duke@435 1230 _bt.verify_not_unallocated(res, size);
duke@435 1231 // mangle a just allocated object with a distinct pattern.
duke@435 1232 debug_only(fc->mangleAllocated(size));
duke@435 1233 }
duke@435 1234
duke@435 1235 return res;
duke@435 1236 }
duke@435 1237
duke@435 1238 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
duke@435 1239 HeapWord* res = NULL;
duke@435 1240 // try and use linear allocation for smaller blocks
duke@435 1241 if (size < _smallLinearAllocBlock._allocation_size_limit) {
duke@435 1242 // if successful, the following also adjusts block offset table
duke@435 1243 res = getChunkFromSmallLinearAllocBlock(size);
duke@435 1244 }
duke@435 1245 // Else triage to indexed lists for smaller sizes
duke@435 1246 if (res == NULL) {
duke@435 1247 if (size < SmallForDictionary) {
duke@435 1248 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1249 } else {
duke@435 1250 // else get it from the big dictionary; if even this doesn't
duke@435 1251 // work we are out of luck.
duke@435 1252 res = (HeapWord*)getChunkFromDictionaryExact(size);
duke@435 1253 }
duke@435 1254 }
duke@435 1255
duke@435 1256 return res;
duke@435 1257 }
duke@435 1258
duke@435 1259 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
duke@435 1260 assert_lock_strong(freelistLock());
duke@435 1261 HeapWord* res = NULL;
duke@435 1262 assert(size == adjustObjectSize(size),
duke@435 1263 "use adjustObjectSize() before calling into allocate()");
duke@435 1264
duke@435 1265 // Strategy
duke@435 1266 // if small
duke@435 1267 // exact size from small object indexed list if small
duke@435 1268 // small or large linear allocation block (linAB) as appropriate
duke@435 1269 // take from lists of greater sized chunks
duke@435 1270 // else
duke@435 1271 // dictionary
duke@435 1272 // small or large linear allocation block if it has the space
duke@435 1273 // Try allocating exact size from indexTable first
duke@435 1274 if (size < IndexSetSize) {
duke@435 1275 res = (HeapWord*) getChunkFromIndexedFreeList(size);
duke@435 1276 if(res != NULL) {
duke@435 1277 assert(res != (HeapWord*)_indexedFreeList[size].head(),
duke@435 1278 "Not removed from free list");
duke@435 1279 // no block offset table adjustment is necessary on blocks in
duke@435 1280 // the indexed lists.
duke@435 1281
duke@435 1282 // Try allocating from the small LinAB
duke@435 1283 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
duke@435 1284 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
duke@435 1285 // if successful, the above also adjusts block offset table
duke@435 1286 // Note that this call will refill the LinAB to
duke@435 1287 // satisfy the request. This is different that
duke@435 1288 // evm.
duke@435 1289 // Don't record chunk off a LinAB? smallSplitBirth(size);
duke@435 1290 } else {
duke@435 1291 // Raid the exact free lists larger than size, even if they are not
duke@435 1292 // overpopulated.
duke@435 1293 res = (HeapWord*) getChunkFromGreater(size);
duke@435 1294 }
duke@435 1295 } else {
duke@435 1296 // Big objects get allocated directly from the dictionary.
duke@435 1297 res = (HeapWord*) getChunkFromDictionaryExact(size);
duke@435 1298 if (res == NULL) {
duke@435 1299 // Try hard not to fail since an allocation failure will likely
duke@435 1300 // trigger a synchronous GC. Try to get the space from the
duke@435 1301 // allocation blocks.
duke@435 1302 res = getChunkFromSmallLinearAllocBlockRemainder(size);
duke@435 1303 }
duke@435 1304 }
duke@435 1305
duke@435 1306 return res;
duke@435 1307 }
duke@435 1308
duke@435 1309 // A worst-case estimate of the space required (in HeapWords) to expand the heap
duke@435 1310 // when promoting obj.
duke@435 1311 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
duke@435 1312 // Depending on the object size, expansion may require refilling either a
duke@435 1313 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
duke@435 1314 // is added because the dictionary may over-allocate to avoid fragmentation.
duke@435 1315 size_t space = obj_size;
duke@435 1316 if (!_adaptive_freelists) {
duke@435 1317 space = MAX2(space, _smallLinearAllocBlock._refillSize);
duke@435 1318 }
duke@435 1319 space += _promoInfo.refillSize() + 2 * MinChunkSize;
duke@435 1320 return space;
duke@435 1321 }
duke@435 1322
duke@435 1323 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
duke@435 1324 FreeChunk* ret;
duke@435 1325
duke@435 1326 assert(numWords >= MinChunkSize, "Size is less than minimum");
duke@435 1327 assert(linearAllocationWouldFail() || bestFitFirst(),
duke@435 1328 "Should not be here");
duke@435 1329
duke@435 1330 size_t i;
duke@435 1331 size_t currSize = numWords + MinChunkSize;
duke@435 1332 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
duke@435 1333 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
duke@435 1334 FreeList* fl = &_indexedFreeList[i];
duke@435 1335 if (fl->head()) {
duke@435 1336 ret = getFromListGreater(fl, numWords);
duke@435 1337 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1338 return ret;
duke@435 1339 }
duke@435 1340 }
duke@435 1341
duke@435 1342 currSize = MAX2((size_t)SmallForDictionary,
duke@435 1343 (size_t)(numWords + MinChunkSize));
duke@435 1344
duke@435 1345 /* Try to get a chunk that satisfies request, while avoiding
duke@435 1346 fragmentation that can't be handled. */
duke@435 1347 {
duke@435 1348 ret = dictionary()->getChunk(currSize);
duke@435 1349 if (ret != NULL) {
duke@435 1350 assert(ret->size() - numWords >= MinChunkSize,
duke@435 1351 "Chunk is too small");
duke@435 1352 _bt.allocated((HeapWord*)ret, ret->size());
duke@435 1353 /* Carve returned chunk. */
duke@435 1354 (void) splitChunkAndReturnRemainder(ret, numWords);
duke@435 1355 /* Label this as no longer a free chunk. */
duke@435 1356 assert(ret->isFree(), "This chunk should be free");
duke@435 1357 ret->linkPrev(NULL);
duke@435 1358 }
duke@435 1359 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
duke@435 1360 return ret;
duke@435 1361 }
duke@435 1362 ShouldNotReachHere();
duke@435 1363 }
duke@435 1364
ysr@3220 1365 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
duke@435 1366 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
duke@435 1367 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
duke@435 1368 }
duke@435 1369
ysr@3220 1370 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
ysr@3220 1371 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
ysr@3220 1372 (_smallLinearAllocBlock._word_size == fc->size()),
ysr@3220 1373 "Linear allocation block shows incorrect size");
ysr@3220 1374 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
ysr@3220 1375 (_smallLinearAllocBlock._word_size == fc->size()));
ysr@3220 1376 }
ysr@3220 1377
ysr@3220 1378 // Check if the purported free chunk is present either as a linear
ysr@3220 1379 // allocation block, the size-indexed table of (smaller) free blocks,
ysr@3220 1380 // or the larger free blocks kept in the binary tree dictionary.
duke@435 1381 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
ysr@3220 1382 if (verify_chunk_is_linear_alloc_block(fc)) {
ysr@3220 1383 return true;
ysr@3220 1384 } else if (fc->size() < IndexSetSize) {
ysr@3220 1385 return verifyChunkInIndexedFreeLists(fc);
ysr@3220 1386 } else {
duke@435 1387 return dictionary()->verifyChunkInFreeLists(fc);
duke@435 1388 }
duke@435 1389 }
duke@435 1390
duke@435 1391 #ifndef PRODUCT
duke@435 1392 void CompactibleFreeListSpace::assert_locked() const {
duke@435 1393 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
duke@435 1394 }
ysr@1580 1395
ysr@1580 1396 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
ysr@1580 1397 CMSLockVerifier::assert_locked(lock);
ysr@1580 1398 }
duke@435 1399 #endif
duke@435 1400
duke@435 1401 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
duke@435 1402 // In the parallel case, the main thread holds the free list lock
duke@435 1403 // on behalf the parallel threads.
duke@435 1404 FreeChunk* fc;
duke@435 1405 {
duke@435 1406 // If GC is parallel, this might be called by several threads.
duke@435 1407 // This should be rare enough that the locking overhead won't affect
duke@435 1408 // the sequential code.
duke@435 1409 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 1410 Mutex::_no_safepoint_check_flag);
duke@435 1411 fc = getChunkFromDictionary(size);
duke@435 1412 }
duke@435 1413 if (fc != NULL) {
duke@435 1414 fc->dontCoalesce();
duke@435 1415 assert(fc->isFree(), "Should be free, but not coalescable");
duke@435 1416 // Verify that the block offset table shows this to
duke@435 1417 // be a single block, but not one which is unallocated.
duke@435 1418 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1419 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 1420 }
duke@435 1421 return fc;
duke@435 1422 }
duke@435 1423
coleenp@548 1424 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
duke@435 1425 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1426 assert_locked();
duke@435 1427
duke@435 1428 // if we are tracking promotions, then first ensure space for
duke@435 1429 // promotion (including spooling space for saving header if necessary).
duke@435 1430 // then allocate and copy, then track promoted info if needed.
duke@435 1431 // When tracking (see PromotionInfo::track()), the mark word may
duke@435 1432 // be displaced and in this case restoration of the mark word
duke@435 1433 // occurs in the (oop_since_save_marks_)iterate phase.
duke@435 1434 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
duke@435 1435 return NULL;
duke@435 1436 }
duke@435 1437 // Call the allocate(size_t, bool) form directly to avoid the
duke@435 1438 // additional call through the allocate(size_t) form. Having
duke@435 1439 // the compile inline the call is problematic because allocate(size_t)
duke@435 1440 // is a virtual method.
duke@435 1441 HeapWord* res = allocate(adjustObjectSize(obj_size));
duke@435 1442 if (res != NULL) {
duke@435 1443 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
duke@435 1444 // if we should be tracking promotions, do so.
duke@435 1445 if (_promoInfo.tracking()) {
duke@435 1446 _promoInfo.track((PromotedObject*)res);
duke@435 1447 }
duke@435 1448 }
duke@435 1449 return oop(res);
duke@435 1450 }
duke@435 1451
duke@435 1452 HeapWord*
duke@435 1453 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
duke@435 1454 assert_locked();
duke@435 1455 assert(size >= MinChunkSize, "minimum chunk size");
duke@435 1456 assert(size < _smallLinearAllocBlock._allocation_size_limit,
duke@435 1457 "maximum from smallLinearAllocBlock");
duke@435 1458 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
duke@435 1459 }
duke@435 1460
duke@435 1461 HeapWord*
duke@435 1462 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
duke@435 1463 size_t size) {
duke@435 1464 assert_locked();
duke@435 1465 assert(size >= MinChunkSize, "too small");
duke@435 1466 HeapWord* res = NULL;
duke@435 1467 // Try to do linear allocation from blk, making sure that
duke@435 1468 if (blk->_word_size == 0) {
duke@435 1469 // We have probably been unable to fill this either in the prologue or
duke@435 1470 // when it was exhausted at the last linear allocation. Bail out until
duke@435 1471 // next time.
duke@435 1472 assert(blk->_ptr == NULL, "consistency check");
duke@435 1473 return NULL;
duke@435 1474 }
duke@435 1475 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
duke@435 1476 res = getChunkFromLinearAllocBlockRemainder(blk, size);
duke@435 1477 if (res != NULL) return res;
duke@435 1478
duke@435 1479 // about to exhaust this linear allocation block
duke@435 1480 if (blk->_word_size == size) { // exactly satisfied
duke@435 1481 res = blk->_ptr;
duke@435 1482 _bt.allocated(res, blk->_word_size);
duke@435 1483 } else if (size + MinChunkSize <= blk->_refillSize) {
ysr@1580 1484 size_t sz = blk->_word_size;
duke@435 1485 // Update _unallocated_block if the size is such that chunk would be
duke@435 1486 // returned to the indexed free list. All other chunks in the indexed
duke@435 1487 // free lists are allocated from the dictionary so that _unallocated_block
duke@435 1488 // has already been adjusted for them. Do it here so that the cost
duke@435 1489 // for all chunks added back to the indexed free lists.
ysr@1580 1490 if (sz < SmallForDictionary) {
ysr@1580 1491 _bt.allocated(blk->_ptr, sz);
duke@435 1492 }
duke@435 1493 // Return the chunk that isn't big enough, and then refill below.
ysr@1580 1494 addChunkToFreeLists(blk->_ptr, sz);
ysr@1580 1495 splitBirth(sz);
duke@435 1496 // Don't keep statistics on adding back chunk from a LinAB.
duke@435 1497 } else {
duke@435 1498 // A refilled block would not satisfy the request.
duke@435 1499 return NULL;
duke@435 1500 }
duke@435 1501
duke@435 1502 blk->_ptr = NULL; blk->_word_size = 0;
duke@435 1503 refillLinearAllocBlock(blk);
duke@435 1504 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
duke@435 1505 "block was replenished");
duke@435 1506 if (res != NULL) {
duke@435 1507 splitBirth(size);
duke@435 1508 repairLinearAllocBlock(blk);
duke@435 1509 } else if (blk->_ptr != NULL) {
duke@435 1510 res = blk->_ptr;
duke@435 1511 size_t blk_size = blk->_word_size;
duke@435 1512 blk->_word_size -= size;
duke@435 1513 blk->_ptr += size;
duke@435 1514 splitBirth(size);
duke@435 1515 repairLinearAllocBlock(blk);
duke@435 1516 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1517 // view of the BOT and free blocks.
duke@435 1518 // Above must occur before BOT is updated below.
ysr@2071 1519 OrderAccess::storestore();
duke@435 1520 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1521 }
duke@435 1522 return res;
duke@435 1523 }
duke@435 1524
duke@435 1525 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
duke@435 1526 LinearAllocBlock* blk,
duke@435 1527 size_t size) {
duke@435 1528 assert_locked();
duke@435 1529 assert(size >= MinChunkSize, "too small");
duke@435 1530
duke@435 1531 HeapWord* res = NULL;
duke@435 1532 // This is the common case. Keep it simple.
duke@435 1533 if (blk->_word_size >= size + MinChunkSize) {
duke@435 1534 assert(blk->_ptr != NULL, "consistency check");
duke@435 1535 res = blk->_ptr;
duke@435 1536 // Note that the BOT is up-to-date for the linAB before allocation. It
duke@435 1537 // indicates the start of the linAB. The split_block() updates the
duke@435 1538 // BOT for the linAB after the allocation (indicates the start of the
duke@435 1539 // next chunk to be allocated).
duke@435 1540 size_t blk_size = blk->_word_size;
duke@435 1541 blk->_word_size -= size;
duke@435 1542 blk->_ptr += size;
duke@435 1543 splitBirth(size);
duke@435 1544 repairLinearAllocBlock(blk);
duke@435 1545 // Update BOT last so that other (parallel) GC threads see a consistent
duke@435 1546 // view of the BOT and free blocks.
duke@435 1547 // Above must occur before BOT is updated below.
ysr@2071 1548 OrderAccess::storestore();
duke@435 1549 _bt.split_block(res, blk_size, size); // adjust block offset table
duke@435 1550 _bt.allocated(res, size);
duke@435 1551 }
duke@435 1552 return res;
duke@435 1553 }
duke@435 1554
duke@435 1555 FreeChunk*
duke@435 1556 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
duke@435 1557 assert_locked();
duke@435 1558 assert(size < SmallForDictionary, "just checking");
duke@435 1559 FreeChunk* res;
duke@435 1560 res = _indexedFreeList[size].getChunkAtHead();
duke@435 1561 if (res == NULL) {
duke@435 1562 res = getChunkFromIndexedFreeListHelper(size);
duke@435 1563 }
duke@435 1564 _bt.verify_not_unallocated((HeapWord*) res, size);
ysr@1580 1565 assert(res == NULL || res->size() == size, "Incorrect block size");
duke@435 1566 return res;
duke@435 1567 }
duke@435 1568
duke@435 1569 FreeChunk*
ysr@1580 1570 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
ysr@1580 1571 bool replenish) {
duke@435 1572 assert_locked();
duke@435 1573 FreeChunk* fc = NULL;
duke@435 1574 if (size < SmallForDictionary) {
duke@435 1575 assert(_indexedFreeList[size].head() == NULL ||
duke@435 1576 _indexedFreeList[size].surplus() <= 0,
duke@435 1577 "List for this size should be empty or under populated");
duke@435 1578 // Try best fit in exact lists before replenishing the list
duke@435 1579 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
duke@435 1580 // Replenish list.
duke@435 1581 //
duke@435 1582 // Things tried that failed.
duke@435 1583 // Tried allocating out of the two LinAB's first before
duke@435 1584 // replenishing lists.
duke@435 1585 // Tried small linAB of size 256 (size in indexed list)
duke@435 1586 // and replenishing indexed lists from the small linAB.
duke@435 1587 //
duke@435 1588 FreeChunk* newFc = NULL;
ysr@1580 1589 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
duke@435 1590 if (replenish_size < SmallForDictionary) {
duke@435 1591 // Do not replenish from an underpopulated size.
duke@435 1592 if (_indexedFreeList[replenish_size].surplus() > 0 &&
duke@435 1593 _indexedFreeList[replenish_size].head() != NULL) {
ysr@1580 1594 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
ysr@1580 1595 } else if (bestFitFirst()) {
duke@435 1596 newFc = bestFitSmall(replenish_size);
duke@435 1597 }
duke@435 1598 }
ysr@1580 1599 if (newFc == NULL && replenish_size > size) {
ysr@1580 1600 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
ysr@1580 1601 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
ysr@1580 1602 }
ysr@1580 1603 // Note: The stats update re split-death of block obtained above
ysr@1580 1604 // will be recorded below precisely when we know we are going to
ysr@1580 1605 // be actually splitting it into more than one pieces below.
duke@435 1606 if (newFc != NULL) {
ysr@1580 1607 if (replenish || CMSReplenishIntermediate) {
ysr@1580 1608 // Replenish this list and return one block to caller.
ysr@1580 1609 size_t i;
ysr@1580 1610 FreeChunk *curFc, *nextFc;
ysr@1580 1611 size_t num_blk = newFc->size() / size;
ysr@1580 1612 assert(num_blk >= 1, "Smaller than requested?");
ysr@1580 1613 assert(newFc->size() % size == 0, "Should be integral multiple of request");
ysr@1580 1614 if (num_blk > 1) {
ysr@1580 1615 // we are sure we will be splitting the block just obtained
ysr@1580 1616 // into multiple pieces; record the split-death of the original
ysr@1580 1617 splitDeath(replenish_size);
ysr@1580 1618 }
ysr@1580 1619 // carve up and link blocks 0, ..., num_blk - 2
ysr@1580 1620 // The last chunk is not added to the lists but is returned as the
ysr@1580 1621 // free chunk.
ysr@1580 1622 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
ysr@1580 1623 i = 0;
ysr@1580 1624 i < (num_blk - 1);
ysr@1580 1625 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
ysr@1580 1626 i++) {
ysr@1580 1627 curFc->setSize(size);
ysr@1580 1628 // Don't record this as a return in order to try and
ysr@1580 1629 // determine the "returns" from a GC.
ysr@1580 1630 _bt.verify_not_unallocated((HeapWord*) fc, size);
ysr@1580 1631 _indexedFreeList[size].returnChunkAtTail(curFc, false);
ysr@1580 1632 _bt.mark_block((HeapWord*)curFc, size);
ysr@1580 1633 splitBirth(size);
ysr@1580 1634 // Don't record the initial population of the indexed list
ysr@1580 1635 // as a split birth.
ysr@1580 1636 }
ysr@1580 1637
ysr@1580 1638 // check that the arithmetic was OK above
ysr@1580 1639 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
ysr@1580 1640 "inconsistency in carving newFc");
duke@435 1641 curFc->setSize(size);
duke@435 1642 _bt.mark_block((HeapWord*)curFc, size);
duke@435 1643 splitBirth(size);
ysr@1580 1644 fc = curFc;
ysr@1580 1645 } else {
ysr@1580 1646 // Return entire block to caller
ysr@1580 1647 fc = newFc;
duke@435 1648 }
duke@435 1649 }
duke@435 1650 }
duke@435 1651 } else {
duke@435 1652 // Get a free chunk from the free chunk dictionary to be returned to
duke@435 1653 // replenish the indexed free list.
duke@435 1654 fc = getChunkFromDictionaryExact(size);
duke@435 1655 }
ysr@1580 1656 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
duke@435 1657 return fc;
duke@435 1658 }
duke@435 1659
duke@435 1660 FreeChunk*
duke@435 1661 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
duke@435 1662 assert_locked();
duke@435 1663 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1664 if (fc == NULL) {
duke@435 1665 return NULL;
duke@435 1666 }
duke@435 1667 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1668 if (fc->size() >= size + MinChunkSize) {
duke@435 1669 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1670 }
duke@435 1671 assert(fc->size() >= size, "chunk too small");
duke@435 1672 assert(fc->size() < size + MinChunkSize, "chunk too big");
duke@435 1673 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 1674 return fc;
duke@435 1675 }
duke@435 1676
duke@435 1677 FreeChunk*
duke@435 1678 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
duke@435 1679 assert_locked();
duke@435 1680 FreeChunk* fc = _dictionary->getChunk(size);
duke@435 1681 if (fc == NULL) {
duke@435 1682 return fc;
duke@435 1683 }
duke@435 1684 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1685 if (fc->size() == size) {
duke@435 1686 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1687 return fc;
duke@435 1688 }
duke@435 1689 assert(fc->size() > size, "getChunk() guarantee");
duke@435 1690 if (fc->size() < size + MinChunkSize) {
duke@435 1691 // Return the chunk to the dictionary and go get a bigger one.
duke@435 1692 returnChunkToDictionary(fc);
duke@435 1693 fc = _dictionary->getChunk(size + MinChunkSize);
duke@435 1694 if (fc == NULL) {
duke@435 1695 return NULL;
duke@435 1696 }
duke@435 1697 _bt.allocated((HeapWord*)fc, fc->size());
duke@435 1698 }
duke@435 1699 assert(fc->size() >= size + MinChunkSize, "tautology");
duke@435 1700 fc = splitChunkAndReturnRemainder(fc, size);
duke@435 1701 assert(fc->size() == size, "chunk is wrong size");
duke@435 1702 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1703 return fc;
duke@435 1704 }
duke@435 1705
duke@435 1706 void
duke@435 1707 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
duke@435 1708 assert_locked();
duke@435 1709
duke@435 1710 size_t size = chunk->size();
duke@435 1711 _bt.verify_single_block((HeapWord*)chunk, size);
duke@435 1712 // adjust _unallocated_block downward, as necessary
duke@435 1713 _bt.freed((HeapWord*)chunk, size);
duke@435 1714 _dictionary->returnChunk(chunk);
ysr@1580 1715 #ifndef PRODUCT
ysr@1580 1716 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1717 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
ysr@1580 1718 }
ysr@1580 1719 #endif // PRODUCT
duke@435 1720 }
duke@435 1721
duke@435 1722 void
duke@435 1723 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
duke@435 1724 assert_locked();
duke@435 1725 size_t size = fc->size();
duke@435 1726 _bt.verify_single_block((HeapWord*) fc, size);
duke@435 1727 _bt.verify_not_unallocated((HeapWord*) fc, size);
duke@435 1728 if (_adaptive_freelists) {
duke@435 1729 _indexedFreeList[size].returnChunkAtTail(fc);
duke@435 1730 } else {
duke@435 1731 _indexedFreeList[size].returnChunkAtHead(fc);
duke@435 1732 }
ysr@1580 1733 #ifndef PRODUCT
ysr@1580 1734 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
ysr@1580 1735 _indexedFreeList[size].verify_stats();
ysr@1580 1736 }
ysr@1580 1737 #endif // PRODUCT
duke@435 1738 }
duke@435 1739
duke@435 1740 // Add chunk to end of last block -- if it's the largest
duke@435 1741 // block -- and update BOT and census data. We would
duke@435 1742 // of course have preferred to coalesce it with the
duke@435 1743 // last block, but it's currently less expensive to find the
duke@435 1744 // largest block than it is to find the last.
duke@435 1745 void
duke@435 1746 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
duke@435 1747 HeapWord* chunk, size_t size) {
duke@435 1748 // check that the chunk does lie in this space!
duke@435 1749 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1750 // One of the parallel gc task threads may be here
duke@435 1751 // whilst others are allocating.
duke@435 1752 Mutex* lock = NULL;
duke@435 1753 if (ParallelGCThreads != 0) {
duke@435 1754 lock = &_parDictionaryAllocLock;
duke@435 1755 }
duke@435 1756 FreeChunk* ec;
duke@435 1757 {
duke@435 1758 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1759 ec = dictionary()->findLargestDict(); // get largest block
duke@435 1760 if (ec != NULL && ec->end() == chunk) {
duke@435 1761 // It's a coterminal block - we can coalesce.
duke@435 1762 size_t old_size = ec->size();
duke@435 1763 coalDeath(old_size);
duke@435 1764 removeChunkFromDictionary(ec);
duke@435 1765 size += old_size;
duke@435 1766 } else {
duke@435 1767 ec = (FreeChunk*)chunk;
duke@435 1768 }
duke@435 1769 }
duke@435 1770 ec->setSize(size);
duke@435 1771 debug_only(ec->mangleFreed(size));
duke@435 1772 if (size < SmallForDictionary) {
duke@435 1773 lock = _indexedFreeListParLocks[size];
duke@435 1774 }
duke@435 1775 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
duke@435 1776 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
duke@435 1777 // record the birth under the lock since the recording involves
duke@435 1778 // manipulation of the list on which the chunk lives and
duke@435 1779 // if the chunk is allocated and is the last on the list,
duke@435 1780 // the list can go away.
duke@435 1781 coalBirth(size);
duke@435 1782 }
duke@435 1783
duke@435 1784 void
duke@435 1785 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
duke@435 1786 size_t size) {
duke@435 1787 // check that the chunk does lie in this space!
duke@435 1788 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
duke@435 1789 assert_locked();
duke@435 1790 _bt.verify_single_block(chunk, size);
duke@435 1791
duke@435 1792 FreeChunk* fc = (FreeChunk*) chunk;
duke@435 1793 fc->setSize(size);
duke@435 1794 debug_only(fc->mangleFreed(size));
duke@435 1795 if (size < SmallForDictionary) {
duke@435 1796 returnChunkToFreeList(fc);
duke@435 1797 } else {
duke@435 1798 returnChunkToDictionary(fc);
duke@435 1799 }
duke@435 1800 }
duke@435 1801
duke@435 1802 void
duke@435 1803 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
duke@435 1804 size_t size, bool coalesced) {
duke@435 1805 assert_locked();
duke@435 1806 assert(chunk != NULL, "null chunk");
duke@435 1807 if (coalesced) {
duke@435 1808 // repair BOT
duke@435 1809 _bt.single_block(chunk, size);
duke@435 1810 }
duke@435 1811 addChunkToFreeLists(chunk, size);
duke@435 1812 }
duke@435 1813
duke@435 1814 // We _must_ find the purported chunk on our free lists;
duke@435 1815 // we assert if we don't.
duke@435 1816 void
duke@435 1817 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
duke@435 1818 size_t size = fc->size();
duke@435 1819 assert_locked();
duke@435 1820 debug_only(verifyFreeLists());
duke@435 1821 if (size < SmallForDictionary) {
duke@435 1822 removeChunkFromIndexedFreeList(fc);
duke@435 1823 } else {
duke@435 1824 removeChunkFromDictionary(fc);
duke@435 1825 }
duke@435 1826 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1827 debug_only(verifyFreeLists());
duke@435 1828 }
duke@435 1829
duke@435 1830 void
duke@435 1831 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
duke@435 1832 size_t size = fc->size();
duke@435 1833 assert_locked();
duke@435 1834 assert(fc != NULL, "null chunk");
duke@435 1835 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1836 _dictionary->removeChunk(fc);
duke@435 1837 // adjust _unallocated_block upward, as necessary
duke@435 1838 _bt.allocated((HeapWord*)fc, size);
duke@435 1839 }
duke@435 1840
duke@435 1841 void
duke@435 1842 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
duke@435 1843 assert_locked();
duke@435 1844 size_t size = fc->size();
duke@435 1845 _bt.verify_single_block((HeapWord*)fc, size);
duke@435 1846 NOT_PRODUCT(
duke@435 1847 if (FLSVerifyIndexTable) {
duke@435 1848 verifyIndexedFreeList(size);
duke@435 1849 }
duke@435 1850 )
duke@435 1851 _indexedFreeList[size].removeChunk(fc);
duke@435 1852 NOT_PRODUCT(
duke@435 1853 if (FLSVerifyIndexTable) {
duke@435 1854 verifyIndexedFreeList(size);
duke@435 1855 }
duke@435 1856 )
duke@435 1857 }
duke@435 1858
duke@435 1859 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
duke@435 1860 /* A hint is the next larger size that has a surplus.
duke@435 1861 Start search at a size large enough to guarantee that
duke@435 1862 the excess is >= MIN_CHUNK. */
duke@435 1863 size_t start = align_object_size(numWords + MinChunkSize);
duke@435 1864 if (start < IndexSetSize) {
duke@435 1865 FreeList* it = _indexedFreeList;
duke@435 1866 size_t hint = _indexedFreeList[start].hint();
duke@435 1867 while (hint < IndexSetSize) {
duke@435 1868 assert(hint % MinObjAlignment == 0, "hint should be aligned");
duke@435 1869 FreeList *fl = &_indexedFreeList[hint];
duke@435 1870 if (fl->surplus() > 0 && fl->head() != NULL) {
duke@435 1871 // Found a list with surplus, reset original hint
duke@435 1872 // and split out a free chunk which is returned.
duke@435 1873 _indexedFreeList[start].set_hint(hint);
duke@435 1874 FreeChunk* res = getFromListGreater(fl, numWords);
duke@435 1875 assert(res == NULL || res->isFree(),
duke@435 1876 "Should be returning a free chunk");
duke@435 1877 return res;
duke@435 1878 }
duke@435 1879 hint = fl->hint(); /* keep looking */
duke@435 1880 }
duke@435 1881 /* None found. */
duke@435 1882 it[start].set_hint(IndexSetSize);
duke@435 1883 }
duke@435 1884 return NULL;
duke@435 1885 }
duke@435 1886
duke@435 1887 /* Requires fl->size >= numWords + MinChunkSize */
duke@435 1888 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
duke@435 1889 size_t numWords) {
duke@435 1890 FreeChunk *curr = fl->head();
duke@435 1891 size_t oldNumWords = curr->size();
duke@435 1892 assert(numWords >= MinChunkSize, "Word size is too small");
duke@435 1893 assert(curr != NULL, "List is empty");
duke@435 1894 assert(oldNumWords >= numWords + MinChunkSize,
duke@435 1895 "Size of chunks in the list is too small");
duke@435 1896
duke@435 1897 fl->removeChunk(curr);
duke@435 1898 // recorded indirectly by splitChunkAndReturnRemainder -
duke@435 1899 // smallSplit(oldNumWords, numWords);
duke@435 1900 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
duke@435 1901 // Does anything have to be done for the remainder in terms of
duke@435 1902 // fixing the card table?
duke@435 1903 assert(new_chunk == NULL || new_chunk->isFree(),
duke@435 1904 "Should be returning a free chunk");
duke@435 1905 return new_chunk;
duke@435 1906 }
duke@435 1907
duke@435 1908 FreeChunk*
duke@435 1909 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
duke@435 1910 size_t new_size) {
duke@435 1911 assert_locked();
duke@435 1912 size_t size = chunk->size();
duke@435 1913 assert(size > new_size, "Split from a smaller block?");
duke@435 1914 assert(is_aligned(chunk), "alignment problem");
duke@435 1915 assert(size == adjustObjectSize(size), "alignment problem");
duke@435 1916 size_t rem_size = size - new_size;
duke@435 1917 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
duke@435 1918 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
duke@435 1919 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
duke@435 1920 assert(is_aligned(ffc), "alignment problem");
duke@435 1921 ffc->setSize(rem_size);
duke@435 1922 ffc->linkNext(NULL);
duke@435 1923 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 1924 // Above must occur before BOT is updated below.
duke@435 1925 // adjust block offset table
ysr@2071 1926 OrderAccess::storestore();
ysr@2071 1927 assert(chunk->isFree() && ffc->isFree(), "Error");
duke@435 1928 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
duke@435 1929 if (rem_size < SmallForDictionary) {
duke@435 1930 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
duke@435 1931 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
jmasa@3294 1932 assert(!is_par ||
jmasa@3294 1933 (SharedHeap::heap()->n_par_threads() ==
jmasa@3294 1934 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
duke@435 1935 returnChunkToFreeList(ffc);
duke@435 1936 split(size, rem_size);
duke@435 1937 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
duke@435 1938 } else {
duke@435 1939 returnChunkToDictionary(ffc);
duke@435 1940 split(size ,rem_size);
duke@435 1941 }
duke@435 1942 chunk->setSize(new_size);
duke@435 1943 return chunk;
duke@435 1944 }
duke@435 1945
duke@435 1946 void
duke@435 1947 CompactibleFreeListSpace::sweep_completed() {
duke@435 1948 // Now that space is probably plentiful, refill linear
duke@435 1949 // allocation blocks as needed.
duke@435 1950 refillLinearAllocBlocksIfNeeded();
duke@435 1951 }
duke@435 1952
duke@435 1953 void
duke@435 1954 CompactibleFreeListSpace::gc_prologue() {
duke@435 1955 assert_locked();
duke@435 1956 if (PrintFLSStatistics != 0) {
duke@435 1957 gclog_or_tty->print("Before GC:\n");
duke@435 1958 reportFreeListStatistics();
duke@435 1959 }
duke@435 1960 refillLinearAllocBlocksIfNeeded();
duke@435 1961 }
duke@435 1962
duke@435 1963 void
duke@435 1964 CompactibleFreeListSpace::gc_epilogue() {
duke@435 1965 assert_locked();
duke@435 1966 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
duke@435 1967 if (_smallLinearAllocBlock._word_size == 0)
duke@435 1968 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
duke@435 1969 }
duke@435 1970 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 1971 _promoInfo.stopTrackingPromotions();
duke@435 1972 repairLinearAllocationBlocks();
duke@435 1973 // Print Space's stats
duke@435 1974 if (PrintFLSStatistics != 0) {
duke@435 1975 gclog_or_tty->print("After GC:\n");
duke@435 1976 reportFreeListStatistics();
duke@435 1977 }
duke@435 1978 }
duke@435 1979
duke@435 1980 // Iteration support, mostly delegated from a CMS generation
duke@435 1981
duke@435 1982 void CompactibleFreeListSpace::save_marks() {
ysr@2825 1983 assert(Thread::current()->is_VM_thread(),
ysr@2825 1984 "Global variable should only be set when single-threaded");
ysr@2825 1985 // Mark the "end" of the used space at the time of this call;
duke@435 1986 // note, however, that promoted objects from this point
duke@435 1987 // on are tracked in the _promoInfo below.
ysr@2071 1988 set_saved_mark_word(unallocated_block());
ysr@2825 1989 #ifdef ASSERT
ysr@2825 1990 // Check the sanity of save_marks() etc.
ysr@2825 1991 MemRegion ur = used_region();
ysr@2825 1992 MemRegion urasm = used_region_at_save_marks();
ysr@2825 1993 assert(ur.contains(urasm),
ysr@2825 1994 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
ysr@2825 1995 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
ysr@2825 1996 ur.start(), ur.end(), urasm.start(), urasm.end()));
ysr@2825 1997 #endif
duke@435 1998 // inform allocator that promotions should be tracked.
duke@435 1999 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
duke@435 2000 _promoInfo.startTrackingPromotions();
duke@435 2001 }
duke@435 2002
duke@435 2003 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
duke@435 2004 assert(_promoInfo.tracking(), "No preceding save_marks?");
ysr@2132 2005 assert(SharedHeap::heap()->n_par_threads() == 0,
ysr@2132 2006 "Shouldn't be called if using parallel gc.");
duke@435 2007 return _promoInfo.noPromotions();
duke@435 2008 }
duke@435 2009
duke@435 2010 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 2011 \
duke@435 2012 void CompactibleFreeListSpace:: \
duke@435 2013 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 2014 assert(SharedHeap::heap()->n_par_threads() == 0, \
duke@435 2015 "Shouldn't be called (yet) during parallel part of gc."); \
duke@435 2016 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
duke@435 2017 /* \
duke@435 2018 * This also restores any displaced headers and removes the elements from \
duke@435 2019 * the iteration set as they are processed, so that we have a clean slate \
duke@435 2020 * at the end of the iteration. Note, thus, that if new objects are \
duke@435 2021 * promoted as a result of the iteration they are iterated over as well. \
duke@435 2022 */ \
duke@435 2023 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
duke@435 2024 }
duke@435 2025
duke@435 2026 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 2027
duke@435 2028
duke@435 2029 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 2030 // ugghh... how would one do this efficiently for a non-contiguous space?
duke@435 2031 guarantee(false, "NYI");
duke@435 2032 }
duke@435 2033
ysr@447 2034 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
duke@435 2035 return _smallLinearAllocBlock._word_size == 0;
duke@435 2036 }
duke@435 2037
duke@435 2038 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
duke@435 2039 // Fix up linear allocation blocks to look like free blocks
duke@435 2040 repairLinearAllocBlock(&_smallLinearAllocBlock);
duke@435 2041 }
duke@435 2042
duke@435 2043 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2044 assert_locked();
duke@435 2045 if (blk->_ptr != NULL) {
duke@435 2046 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
duke@435 2047 "Minimum block size requirement");
duke@435 2048 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
duke@435 2049 fc->setSize(blk->_word_size);
duke@435 2050 fc->linkPrev(NULL); // mark as free
duke@435 2051 fc->dontCoalesce();
duke@435 2052 assert(fc->isFree(), "just marked it free");
duke@435 2053 assert(fc->cantCoalesce(), "just marked it uncoalescable");
duke@435 2054 }
duke@435 2055 }
duke@435 2056
duke@435 2057 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
duke@435 2058 assert_locked();
duke@435 2059 if (_smallLinearAllocBlock._ptr == NULL) {
duke@435 2060 assert(_smallLinearAllocBlock._word_size == 0,
duke@435 2061 "Size of linAB should be zero if the ptr is NULL");
duke@435 2062 // Reset the linAB refill and allocation size limit.
duke@435 2063 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
duke@435 2064 }
duke@435 2065 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
duke@435 2066 }
duke@435 2067
duke@435 2068 void
duke@435 2069 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
duke@435 2070 assert_locked();
duke@435 2071 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
duke@435 2072 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
duke@435 2073 "blk invariant");
duke@435 2074 if (blk->_ptr == NULL) {
duke@435 2075 refillLinearAllocBlock(blk);
duke@435 2076 }
duke@435 2077 if (PrintMiscellaneous && Verbose) {
duke@435 2078 if (blk->_word_size == 0) {
duke@435 2079 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
duke@435 2080 }
duke@435 2081 }
duke@435 2082 }
duke@435 2083
duke@435 2084 void
duke@435 2085 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
duke@435 2086 assert_locked();
duke@435 2087 assert(blk->_word_size == 0 && blk->_ptr == NULL,
duke@435 2088 "linear allocation block should be empty");
duke@435 2089 FreeChunk* fc;
duke@435 2090 if (blk->_refillSize < SmallForDictionary &&
duke@435 2091 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
duke@435 2092 // A linAB's strategy might be to use small sizes to reduce
duke@435 2093 // fragmentation but still get the benefits of allocation from a
duke@435 2094 // linAB.
duke@435 2095 } else {
duke@435 2096 fc = getChunkFromDictionary(blk->_refillSize);
duke@435 2097 }
duke@435 2098 if (fc != NULL) {
duke@435 2099 blk->_ptr = (HeapWord*)fc;
duke@435 2100 blk->_word_size = fc->size();
duke@435 2101 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
duke@435 2102 }
duke@435 2103 }
duke@435 2104
ysr@447 2105 // Support for concurrent collection policy decisions.
ysr@447 2106 bool CompactibleFreeListSpace::should_concurrent_collect() const {
ysr@447 2107 // In the future we might want to add in frgamentation stats --
ysr@447 2108 // including erosion of the "mountain" into this decision as well.
ysr@447 2109 return !adaptive_freelists() && linearAllocationWouldFail();
ysr@447 2110 }
ysr@447 2111
duke@435 2112 // Support for compaction
duke@435 2113
duke@435 2114 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 2115 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
duke@435 2116 // prepare_for_compaction() uses the space between live objects
duke@435 2117 // so that later phase can skip dead space quickly. So verification
duke@435 2118 // of the free lists doesn't work after.
duke@435 2119 }
duke@435 2120
duke@435 2121 #define obj_size(q) adjustObjectSize(oop(q)->size())
duke@435 2122 #define adjust_obj_size(s) adjustObjectSize(s)
duke@435 2123
duke@435 2124 void CompactibleFreeListSpace::adjust_pointers() {
duke@435 2125 // In other versions of adjust_pointers(), a bail out
duke@435 2126 // based on the amount of live data in the generation
duke@435 2127 // (i.e., if 0, bail out) may be used.
duke@435 2128 // Cannot test used() == 0 here because the free lists have already
duke@435 2129 // been mangled by the compaction.
duke@435 2130
duke@435 2131 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 2132 // See note about verification in prepare_for_compaction().
duke@435 2133 }
duke@435 2134
duke@435 2135 void CompactibleFreeListSpace::compact() {
duke@435 2136 SCAN_AND_COMPACT(obj_size);
duke@435 2137 }
duke@435 2138
duke@435 2139 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
duke@435 2140 // where fbs is free block sizes
duke@435 2141 double CompactibleFreeListSpace::flsFrag() const {
duke@435 2142 size_t itabFree = totalSizeInIndexedFreeLists();
duke@435 2143 double frag = 0.0;
duke@435 2144 size_t i;
duke@435 2145
duke@435 2146 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2147 double sz = i;
duke@435 2148 frag += _indexedFreeList[i].count() * (sz * sz);
duke@435 2149 }
duke@435 2150
duke@435 2151 double totFree = itabFree +
duke@435 2152 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
duke@435 2153 if (totFree > 0) {
duke@435 2154 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
duke@435 2155 (totFree * totFree));
duke@435 2156 frag = (double)1.0 - frag;
duke@435 2157 } else {
duke@435 2158 assert(frag == 0.0, "Follows from totFree == 0");
duke@435 2159 }
duke@435 2160 return frag;
duke@435 2161 }
duke@435 2162
duke@435 2163 void CompactibleFreeListSpace::beginSweepFLCensus(
duke@435 2164 float inter_sweep_current,
ysr@1580 2165 float inter_sweep_estimate,
ysr@1580 2166 float intra_sweep_estimate) {
duke@435 2167 assert_locked();
duke@435 2168 size_t i;
duke@435 2169 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2170 FreeList* fl = &_indexedFreeList[i];
ysr@1580 2171 if (PrintFLSStatistics > 1) {
ysr@1580 2172 gclog_or_tty->print("size[%d] : ", i);
ysr@1580 2173 }
ysr@1580 2174 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
ysr@1580 2175 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
duke@435 2176 fl->set_beforeSweep(fl->count());
duke@435 2177 fl->set_bfrSurp(fl->surplus());
duke@435 2178 }
ysr@1580 2179 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
duke@435 2180 inter_sweep_current,
ysr@1580 2181 inter_sweep_estimate,
ysr@1580 2182 intra_sweep_estimate);
duke@435 2183 }
duke@435 2184
duke@435 2185 void CompactibleFreeListSpace::setFLSurplus() {
duke@435 2186 assert_locked();
duke@435 2187 size_t i;
duke@435 2188 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2189 FreeList *fl = &_indexedFreeList[i];
duke@435 2190 fl->set_surplus(fl->count() -
ysr@1580 2191 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
duke@435 2192 }
duke@435 2193 }
duke@435 2194
duke@435 2195 void CompactibleFreeListSpace::setFLHints() {
duke@435 2196 assert_locked();
duke@435 2197 size_t i;
duke@435 2198 size_t h = IndexSetSize;
duke@435 2199 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
duke@435 2200 FreeList *fl = &_indexedFreeList[i];
duke@435 2201 fl->set_hint(h);
duke@435 2202 if (fl->surplus() > 0) {
duke@435 2203 h = i;
duke@435 2204 }
duke@435 2205 }
duke@435 2206 }
duke@435 2207
duke@435 2208 void CompactibleFreeListSpace::clearFLCensus() {
duke@435 2209 assert_locked();
ysr@3264 2210 size_t i;
duke@435 2211 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2212 FreeList *fl = &_indexedFreeList[i];
duke@435 2213 fl->set_prevSweep(fl->count());
duke@435 2214 fl->set_coalBirths(0);
duke@435 2215 fl->set_coalDeaths(0);
duke@435 2216 fl->set_splitBirths(0);
duke@435 2217 fl->set_splitDeaths(0);
duke@435 2218 }
duke@435 2219 }
duke@435 2220
ysr@447 2221 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
ysr@1580 2222 if (PrintFLSStatistics > 0) {
ysr@1580 2223 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
ysr@1580 2224 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
ysr@1580 2225 largestAddr);
ysr@1580 2226 }
duke@435 2227 setFLSurplus();
duke@435 2228 setFLHints();
duke@435 2229 if (PrintGC && PrintFLSCensus > 0) {
ysr@447 2230 printFLCensus(sweep_count);
duke@435 2231 }
duke@435 2232 clearFLCensus();
duke@435 2233 assert_locked();
ysr@1580 2234 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
duke@435 2235 }
duke@435 2236
duke@435 2237 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
duke@435 2238 if (size < SmallForDictionary) {
duke@435 2239 FreeList *fl = &_indexedFreeList[size];
duke@435 2240 return (fl->coalDesired() < 0) ||
duke@435 2241 ((int)fl->count() > fl->coalDesired());
duke@435 2242 } else {
duke@435 2243 return dictionary()->coalDictOverPopulated(size);
duke@435 2244 }
duke@435 2245 }
duke@435 2246
duke@435 2247 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
duke@435 2248 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2249 FreeList *fl = &_indexedFreeList[size];
duke@435 2250 fl->increment_coalBirths();
duke@435 2251 fl->increment_surplus();
duke@435 2252 }
duke@435 2253
duke@435 2254 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
duke@435 2255 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2256 FreeList *fl = &_indexedFreeList[size];
duke@435 2257 fl->increment_coalDeaths();
duke@435 2258 fl->decrement_surplus();
duke@435 2259 }
duke@435 2260
duke@435 2261 void CompactibleFreeListSpace::coalBirth(size_t size) {
duke@435 2262 if (size < SmallForDictionary) {
duke@435 2263 smallCoalBirth(size);
duke@435 2264 } else {
duke@435 2265 dictionary()->dictCensusUpdate(size,
duke@435 2266 false /* split */,
duke@435 2267 true /* birth */);
duke@435 2268 }
duke@435 2269 }
duke@435 2270
duke@435 2271 void CompactibleFreeListSpace::coalDeath(size_t size) {
duke@435 2272 if(size < SmallForDictionary) {
duke@435 2273 smallCoalDeath(size);
duke@435 2274 } else {
duke@435 2275 dictionary()->dictCensusUpdate(size,
duke@435 2276 false /* split */,
duke@435 2277 false /* birth */);
duke@435 2278 }
duke@435 2279 }
duke@435 2280
duke@435 2281 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
duke@435 2282 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2283 FreeList *fl = &_indexedFreeList[size];
duke@435 2284 fl->increment_splitBirths();
duke@435 2285 fl->increment_surplus();
duke@435 2286 }
duke@435 2287
duke@435 2288 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
duke@435 2289 assert(size < SmallForDictionary, "Size too large for indexed list");
duke@435 2290 FreeList *fl = &_indexedFreeList[size];
duke@435 2291 fl->increment_splitDeaths();
duke@435 2292 fl->decrement_surplus();
duke@435 2293 }
duke@435 2294
duke@435 2295 void CompactibleFreeListSpace::splitBirth(size_t size) {
duke@435 2296 if (size < SmallForDictionary) {
duke@435 2297 smallSplitBirth(size);
duke@435 2298 } else {
duke@435 2299 dictionary()->dictCensusUpdate(size,
duke@435 2300 true /* split */,
duke@435 2301 true /* birth */);
duke@435 2302 }
duke@435 2303 }
duke@435 2304
duke@435 2305 void CompactibleFreeListSpace::splitDeath(size_t size) {
duke@435 2306 if (size < SmallForDictionary) {
duke@435 2307 smallSplitDeath(size);
duke@435 2308 } else {
duke@435 2309 dictionary()->dictCensusUpdate(size,
duke@435 2310 true /* split */,
duke@435 2311 false /* birth */);
duke@435 2312 }
duke@435 2313 }
duke@435 2314
duke@435 2315 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
duke@435 2316 size_t to2 = from - to1;
duke@435 2317 splitDeath(from);
duke@435 2318 splitBirth(to1);
duke@435 2319 splitBirth(to2);
duke@435 2320 }
duke@435 2321
duke@435 2322 void CompactibleFreeListSpace::print() const {
ysr@2294 2323 print_on(tty);
duke@435 2324 }
duke@435 2325
duke@435 2326 void CompactibleFreeListSpace::prepare_for_verify() {
duke@435 2327 assert_locked();
duke@435 2328 repairLinearAllocationBlocks();
duke@435 2329 // Verify that the SpoolBlocks look like free blocks of
duke@435 2330 // appropriate sizes... To be done ...
duke@435 2331 }
duke@435 2332
duke@435 2333 class VerifyAllBlksClosure: public BlkClosure {
coleenp@548 2334 private:
duke@435 2335 const CompactibleFreeListSpace* _sp;
duke@435 2336 const MemRegion _span;
ysr@2071 2337 HeapWord* _last_addr;
ysr@2071 2338 size_t _last_size;
ysr@2071 2339 bool _last_was_obj;
ysr@2071 2340 bool _last_was_live;
duke@435 2341
duke@435 2342 public:
duke@435 2343 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
ysr@2071 2344 MemRegion span) : _sp(sp), _span(span),
ysr@2071 2345 _last_addr(NULL), _last_size(0),
ysr@2071 2346 _last_was_obj(false), _last_was_live(false) { }
duke@435 2347
coleenp@548 2348 virtual size_t do_blk(HeapWord* addr) {
duke@435 2349 size_t res;
ysr@2071 2350 bool was_obj = false;
ysr@2071 2351 bool was_live = false;
duke@435 2352 if (_sp->block_is_obj(addr)) {
ysr@2071 2353 was_obj = true;
duke@435 2354 oop p = oop(addr);
duke@435 2355 guarantee(p->is_oop(), "Should be an oop");
duke@435 2356 res = _sp->adjustObjectSize(p->size());
duke@435 2357 if (_sp->obj_is_alive(addr)) {
ysr@2071 2358 was_live = true;
duke@435 2359 p->verify();
duke@435 2360 }
duke@435 2361 } else {
duke@435 2362 FreeChunk* fc = (FreeChunk*)addr;
duke@435 2363 res = fc->size();
duke@435 2364 if (FLSVerifyLists && !fc->cantCoalesce()) {
duke@435 2365 guarantee(_sp->verifyChunkInFreeLists(fc),
duke@435 2366 "Chunk should be on a free list");
duke@435 2367 }
duke@435 2368 }
ysr@2071 2369 if (res == 0) {
ysr@2071 2370 gclog_or_tty->print_cr("Livelock: no rank reduction!");
ysr@2071 2371 gclog_or_tty->print_cr(
ysr@2071 2372 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
ysr@2071 2373 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
ysr@2071 2374 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
ysr@2071 2375 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
ysr@2071 2376 _sp->print_on(gclog_or_tty);
ysr@2071 2377 guarantee(false, "Seppuku!");
ysr@2071 2378 }
ysr@2071 2379 _last_addr = addr;
ysr@2071 2380 _last_size = res;
ysr@2071 2381 _last_was_obj = was_obj;
ysr@2071 2382 _last_was_live = was_live;
duke@435 2383 return res;
duke@435 2384 }
duke@435 2385 };
duke@435 2386
duke@435 2387 class VerifyAllOopsClosure: public OopClosure {
coleenp@548 2388 private:
duke@435 2389 const CMSCollector* _collector;
duke@435 2390 const CompactibleFreeListSpace* _sp;
duke@435 2391 const MemRegion _span;
duke@435 2392 const bool _past_remark;
duke@435 2393 const CMSBitMap* _bit_map;
duke@435 2394
coleenp@548 2395 protected:
coleenp@548 2396 void do_oop(void* p, oop obj) {
coleenp@548 2397 if (_span.contains(obj)) { // the interior oop points into CMS heap
coleenp@548 2398 if (!_span.contains(p)) { // reference from outside CMS heap
coleenp@548 2399 // Should be a valid object; the first disjunct below allows
coleenp@548 2400 // us to sidestep an assertion in block_is_obj() that insists
coleenp@548 2401 // that p be in _sp. Note that several generations (and spaces)
coleenp@548 2402 // are spanned by _span (CMS heap) above.
coleenp@548 2403 guarantee(!_sp->is_in_reserved(obj) ||
coleenp@548 2404 _sp->block_is_obj((HeapWord*)obj),
coleenp@548 2405 "Should be an object");
coleenp@548 2406 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2407 obj->verify();
coleenp@548 2408 if (_past_remark) {
coleenp@548 2409 // Remark has been completed, the object should be marked
coleenp@548 2410 _bit_map->isMarked((HeapWord*)obj);
coleenp@548 2411 }
coleenp@548 2412 } else { // reference within CMS heap
coleenp@548 2413 if (_past_remark) {
coleenp@548 2414 // Remark has been completed -- so the referent should have
coleenp@548 2415 // been marked, if referring object is.
coleenp@548 2416 if (_bit_map->isMarked(_collector->block_start(p))) {
coleenp@548 2417 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
coleenp@548 2418 }
coleenp@548 2419 }
coleenp@548 2420 }
coleenp@548 2421 } else if (_sp->is_in_reserved(p)) {
coleenp@548 2422 // the reference is from FLS, and points out of FLS
coleenp@548 2423 guarantee(obj->is_oop(), "Should be an oop");
coleenp@548 2424 obj->verify();
coleenp@548 2425 }
coleenp@548 2426 }
coleenp@548 2427
coleenp@548 2428 template <class T> void do_oop_work(T* p) {
coleenp@548 2429 T heap_oop = oopDesc::load_heap_oop(p);
coleenp@548 2430 if (!oopDesc::is_null(heap_oop)) {
coleenp@548 2431 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
coleenp@548 2432 do_oop(p, obj);
coleenp@548 2433 }
coleenp@548 2434 }
coleenp@548 2435
duke@435 2436 public:
duke@435 2437 VerifyAllOopsClosure(const CMSCollector* collector,
duke@435 2438 const CompactibleFreeListSpace* sp, MemRegion span,
duke@435 2439 bool past_remark, CMSBitMap* bit_map) :
duke@435 2440 OopClosure(), _collector(collector), _sp(sp), _span(span),
duke@435 2441 _past_remark(past_remark), _bit_map(bit_map) { }
duke@435 2442
coleenp@548 2443 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
coleenp@548 2444 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
duke@435 2445 };
duke@435 2446
brutisso@3711 2447 void CompactibleFreeListSpace::verify() const {
duke@435 2448 assert_lock_strong(&_freelistLock);
duke@435 2449 verify_objects_initialized();
duke@435 2450 MemRegion span = _collector->_span;
duke@435 2451 bool past_remark = (_collector->abstract_state() ==
duke@435 2452 CMSCollector::Sweeping);
duke@435 2453
duke@435 2454 ResourceMark rm;
duke@435 2455 HandleMark hm;
duke@435 2456
duke@435 2457 // Check integrity of CFL data structures
duke@435 2458 _promoInfo.verify();
duke@435 2459 _dictionary->verify();
duke@435 2460 if (FLSVerifyIndexTable) {
duke@435 2461 verifyIndexedFreeLists();
duke@435 2462 }
duke@435 2463 // Check integrity of all objects and free blocks in space
duke@435 2464 {
duke@435 2465 VerifyAllBlksClosure cl(this, span);
duke@435 2466 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
duke@435 2467 }
duke@435 2468 // Check that all references in the heap to FLS
duke@435 2469 // are to valid objects in FLS or that references in
duke@435 2470 // FLS are to valid objects elsewhere in the heap
duke@435 2471 if (FLSVerifyAllHeapReferences)
duke@435 2472 {
duke@435 2473 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
duke@435 2474 _collector->markBitMap());
duke@435 2475 CollectedHeap* ch = Universe::heap();
duke@435 2476 ch->oop_iterate(&cl); // all oops in generations
duke@435 2477 ch->permanent_oop_iterate(&cl); // all oops in perm gen
duke@435 2478 }
duke@435 2479
duke@435 2480 if (VerifyObjectStartArray) {
duke@435 2481 // Verify the block offset table
duke@435 2482 _bt.verify();
duke@435 2483 }
duke@435 2484 }
duke@435 2485
duke@435 2486 #ifndef PRODUCT
duke@435 2487 void CompactibleFreeListSpace::verifyFreeLists() const {
duke@435 2488 if (FLSVerifyLists) {
duke@435 2489 _dictionary->verify();
duke@435 2490 verifyIndexedFreeLists();
duke@435 2491 } else {
duke@435 2492 if (FLSVerifyDictionary) {
duke@435 2493 _dictionary->verify();
duke@435 2494 }
duke@435 2495 if (FLSVerifyIndexTable) {
duke@435 2496 verifyIndexedFreeLists();
duke@435 2497 }
duke@435 2498 }
duke@435 2499 }
duke@435 2500 #endif
duke@435 2501
duke@435 2502 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
duke@435 2503 size_t i = 0;
ysr@3264 2504 for (; i < IndexSetStart; i++) {
duke@435 2505 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
duke@435 2506 }
duke@435 2507 for (; i < IndexSetSize; i++) {
duke@435 2508 verifyIndexedFreeList(i);
duke@435 2509 }
duke@435 2510 }
duke@435 2511
duke@435 2512 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
ysr@1580 2513 FreeChunk* fc = _indexedFreeList[size].head();
ysr@1580 2514 FreeChunk* tail = _indexedFreeList[size].tail();
ysr@1580 2515 size_t num = _indexedFreeList[size].count();
ysr@1580 2516 size_t n = 0;
ysr@3264 2517 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
ysr@3220 2518 "Slot should have been empty");
ysr@1580 2519 for (; fc != NULL; fc = fc->next(), n++) {
duke@435 2520 guarantee(fc->size() == size, "Size inconsistency");
duke@435 2521 guarantee(fc->isFree(), "!free?");
duke@435 2522 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
ysr@1580 2523 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
duke@435 2524 }
ysr@1580 2525 guarantee(n == num, "Incorrect count");
duke@435 2526 }
duke@435 2527
duke@435 2528 #ifndef PRODUCT
ysr@3220 2529 void CompactibleFreeListSpace::check_free_list_consistency() const {
duke@435 2530 assert(_dictionary->minSize() <= IndexSetSize,
duke@435 2531 "Some sizes can't be allocated without recourse to"
duke@435 2532 " linear allocation buffers");
duke@435 2533 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
duke@435 2534 "else MIN_TREE_CHUNK_SIZE is wrong");
ysr@3220 2535 assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
ysr@3220 2536 (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
ysr@3264 2537 assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0),
duke@435 2538 "Some for-loops may be incorrectly initialized");
duke@435 2539 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
duke@435 2540 "For-loops that iterate over IndexSet with stride 2 may be wrong");
duke@435 2541 }
duke@435 2542 #endif
duke@435 2543
ysr@447 2544 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
duke@435 2545 assert_lock_strong(&_freelistLock);
ysr@447 2546 FreeList total;
ysr@447 2547 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
ysr@447 2548 FreeList::print_labels_on(gclog_or_tty, "size");
duke@435 2549 size_t totalFree = 0;
duke@435 2550 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
duke@435 2551 const FreeList *fl = &_indexedFreeList[i];
ysr@447 2552 totalFree += fl->count() * fl->size();
ysr@447 2553 if (i % (40*IndexSetStride) == 0) {
ysr@447 2554 FreeList::print_labels_on(gclog_or_tty, "size");
ysr@447 2555 }
ysr@447 2556 fl->print_on(gclog_or_tty);
ysr@447 2557 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
ysr@447 2558 total.set_surplus( total.surplus() + fl->surplus() );
ysr@447 2559 total.set_desired( total.desired() + fl->desired() );
ysr@447 2560 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
ysr@447 2561 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
ysr@447 2562 total.set_count( total.count() + fl->count() );
ysr@447 2563 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
ysr@447 2564 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
ysr@447 2565 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
ysr@447 2566 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
duke@435 2567 }
ysr@447 2568 total.print_on(gclog_or_tty, "TOTAL");
ysr@447 2569 gclog_or_tty->print_cr("Total free in indexed lists "
ysr@447 2570 SIZE_FORMAT " words", totalFree);
duke@435 2571 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
ysr@447 2572 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
ysr@447 2573 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
ysr@447 2574 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
duke@435 2575 _dictionary->printDictCensus();
duke@435 2576 }
duke@435 2577
ysr@1580 2578 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2579 // CFLS_LAB
ysr@1580 2580 ///////////////////////////////////////////////////////////////////////////
ysr@1580 2581
ysr@1580 2582 #define VECTOR_257(x) \
ysr@1580 2583 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
ysr@1580 2584 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2585 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2586 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2587 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2588 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2589 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2590 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2591 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
ysr@1580 2592 x }
ysr@1580 2593
ysr@1580 2594 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
ysr@1580 2595 // OldPLABSize, whose static default is different; if overridden at the
ysr@1580 2596 // command-line, this will get reinitialized via a call to
ysr@1580 2597 // modify_initialization() below.
ysr@1580 2598 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
ysr@1580 2599 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
ysr@1580 2600 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
jmasa@3357 2601 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
duke@435 2602
duke@435 2603 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
duke@435 2604 _cfls(cfls)
duke@435 2605 {
ysr@1580 2606 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
duke@435 2607 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2608 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2609 i += CompactibleFreeListSpace::IndexSetStride) {
duke@435 2610 _indexedFreeList[i].set_size(i);
ysr@1580 2611 _num_blocks[i] = 0;
ysr@1580 2612 }
ysr@1580 2613 }
ysr@1580 2614
ysr@1580 2615 static bool _CFLS_LAB_modified = false;
ysr@1580 2616
ysr@1580 2617 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
ysr@1580 2618 assert(!_CFLS_LAB_modified, "Call only once");
ysr@1580 2619 _CFLS_LAB_modified = true;
ysr@1580 2620 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2621 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2622 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2623 _blocks_to_claim[i].modify(n, wt, true /* force */);
duke@435 2624 }
duke@435 2625 }
duke@435 2626
duke@435 2627 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
duke@435 2628 FreeChunk* res;
ysr@2132 2629 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
duke@435 2630 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
duke@435 2631 // This locking manages sync with other large object allocations.
duke@435 2632 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
duke@435 2633 Mutex::_no_safepoint_check_flag);
duke@435 2634 res = _cfls->getChunkFromDictionaryExact(word_sz);
duke@435 2635 if (res == NULL) return NULL;
duke@435 2636 } else {
duke@435 2637 FreeList* fl = &_indexedFreeList[word_sz];
duke@435 2638 if (fl->count() == 0) {
duke@435 2639 // Attempt to refill this local free list.
ysr@1580 2640 get_from_global_pool(word_sz, fl);
duke@435 2641 // If it didn't work, give up.
duke@435 2642 if (fl->count() == 0) return NULL;
duke@435 2643 }
duke@435 2644 res = fl->getChunkAtHead();
duke@435 2645 assert(res != NULL, "Why was count non-zero?");
duke@435 2646 }
duke@435 2647 res->markNotFree();
duke@435 2648 assert(!res->isFree(), "shouldn't be marked free");
coleenp@622 2649 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
duke@435 2650 // mangle a just allocated object with a distinct pattern.
duke@435 2651 debug_only(res->mangleAllocated(word_sz));
duke@435 2652 return (HeapWord*)res;
duke@435 2653 }
duke@435 2654
ysr@1580 2655 // Get a chunk of blocks of the right size and update related
ysr@1580 2656 // book-keeping stats
ysr@1580 2657 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
ysr@1580 2658 // Get the #blocks we want to claim
ysr@1580 2659 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
ysr@1580 2660 assert(n_blks > 0, "Error");
ysr@1580 2661 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
ysr@1580 2662 // In some cases, when the application has a phase change,
ysr@1580 2663 // there may be a sudden and sharp shift in the object survival
ysr@1580 2664 // profile, and updating the counts at the end of a scavenge
ysr@1580 2665 // may not be quick enough, giving rise to large scavenge pauses
ysr@1580 2666 // during these phase changes. It is beneficial to detect such
ysr@1580 2667 // changes on-the-fly during a scavenge and avoid such a phase-change
ysr@1580 2668 // pothole. The following code is a heuristic attempt to do that.
ysr@1580 2669 // It is protected by a product flag until we have gained
ysr@1580 2670 // enough experience with this heuristic and fine-tuned its behaviour.
ysr@1580 2671 // WARNING: This might increase fragmentation if we overreact to
ysr@1580 2672 // small spikes, so some kind of historical smoothing based on
ysr@1580 2673 // previous experience with the greater reactivity might be useful.
ysr@1580 2674 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
ysr@1580 2675 // default.
ysr@1580 2676 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
ysr@1580 2677 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
ysr@1580 2678 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
ysr@1580 2679 n_blks = MIN2(n_blks, CMSOldPLABMax);
ysr@1580 2680 }
ysr@1580 2681 assert(n_blks > 0, "Error");
ysr@1580 2682 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
ysr@1580 2683 // Update stats table entry for this block size
ysr@1580 2684 _num_blocks[word_sz] += fl->count();
ysr@1580 2685 }
ysr@1580 2686
ysr@1580 2687 void CFLS_LAB::compute_desired_plab_size() {
ysr@1580 2688 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
duke@435 2689 i < CompactibleFreeListSpace::IndexSetSize;
duke@435 2690 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2691 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
ysr@1580 2692 "Counter inconsistency");
ysr@1580 2693 if (_global_num_workers[i] > 0) {
ysr@1580 2694 // Need to smooth wrt historical average
ysr@1580 2695 if (ResizeOldPLAB) {
ysr@1580 2696 _blocks_to_claim[i].sample(
ysr@1580 2697 MAX2((size_t)CMSOldPLABMin,
ysr@1580 2698 MIN2((size_t)CMSOldPLABMax,
ysr@1580 2699 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
ysr@1580 2700 }
ysr@1580 2701 // Reset counters for next round
ysr@1580 2702 _global_num_workers[i] = 0;
ysr@1580 2703 _global_num_blocks[i] = 0;
ysr@1580 2704 if (PrintOldPLAB) {
ysr@1580 2705 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
ysr@1580 2706 }
duke@435 2707 }
duke@435 2708 }
duke@435 2709 }
duke@435 2710
ysr@3220 2711 // If this is changed in the future to allow parallel
ysr@3220 2712 // access, one would need to take the FL locks and,
ysr@3220 2713 // depending on how it is used, stagger access from
ysr@3220 2714 // parallel threads to reduce contention.
ysr@1580 2715 void CFLS_LAB::retire(int tid) {
ysr@1580 2716 // We run this single threaded with the world stopped;
ysr@1580 2717 // so no need for locks and such.
ysr@1580 2718 NOT_PRODUCT(Thread* t = Thread::current();)
ysr@1580 2719 assert(Thread::current()->is_VM_thread(), "Error");
ysr@1580 2720 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
ysr@1580 2721 i < CompactibleFreeListSpace::IndexSetSize;
ysr@1580 2722 i += CompactibleFreeListSpace::IndexSetStride) {
ysr@1580 2723 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
ysr@1580 2724 "Can't retire more than what we obtained");
ysr@1580 2725 if (_num_blocks[i] > 0) {
ysr@1580 2726 size_t num_retire = _indexedFreeList[i].count();
ysr@1580 2727 assert(_num_blocks[i] > num_retire, "Should have used at least one");
ysr@1580 2728 {
ysr@3220 2729 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
ysr@3220 2730 // Mutex::_no_safepoint_check_flag);
ysr@3220 2731
ysr@1580 2732 // Update globals stats for num_blocks used
ysr@1580 2733 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
ysr@1580 2734 _global_num_workers[i]++;
jmasa@3357 2735 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
ysr@1580 2736 if (num_retire > 0) {
ysr@1580 2737 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
ysr@1580 2738 // Reset this list.
ysr@1580 2739 _indexedFreeList[i] = FreeList();
ysr@1580 2740 _indexedFreeList[i].set_size(i);
ysr@1580 2741 }
ysr@1580 2742 }
ysr@1580 2743 if (PrintOldPLAB) {
ysr@1580 2744 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
ysr@1580 2745 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
ysr@1580 2746 }
ysr@1580 2747 // Reset stats for next round
ysr@1580 2748 _num_blocks[i] = 0;
ysr@1580 2749 }
ysr@1580 2750 }
ysr@1580 2751 }
ysr@1580 2752
ysr@1580 2753 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
duke@435 2754 assert(fl->count() == 0, "Precondition.");
duke@435 2755 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
duke@435 2756 "Precondition");
duke@435 2757
ysr@1580 2758 // We'll try all multiples of word_sz in the indexed set, starting with
ysr@1580 2759 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
ysr@1580 2760 // then try getting a big chunk and splitting it.
ysr@1580 2761 {
ysr@1580 2762 bool found;
ysr@1580 2763 int k;
ysr@1580 2764 size_t cur_sz;
ysr@1580 2765 for (k = 1, cur_sz = k * word_sz, found = false;
ysr@1580 2766 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
ysr@1580 2767 (CMSSplitIndexedFreeListBlocks || k <= 1);
ysr@1580 2768 k++, cur_sz = k * word_sz) {
ysr@1580 2769 FreeList fl_for_cur_sz; // Empty.
ysr@1580 2770 fl_for_cur_sz.set_size(cur_sz);
ysr@1580 2771 {
ysr@1580 2772 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
ysr@1580 2773 Mutex::_no_safepoint_check_flag);
ysr@2071 2774 FreeList* gfl = &_indexedFreeList[cur_sz];
ysr@1580 2775 if (gfl->count() != 0) {
ysr@1580 2776 // nn is the number of chunks of size cur_sz that
ysr@1580 2777 // we'd need to split k-ways each, in order to create
ysr@1580 2778 // "n" chunks of size word_sz each.
ysr@1580 2779 const size_t nn = MAX2(n/k, (size_t)1);
ysr@1580 2780 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
ysr@1580 2781 found = true;
ysr@1580 2782 if (k > 1) {
ysr@1580 2783 // Update split death stats for the cur_sz-size blocks list:
ysr@1580 2784 // we increment the split death count by the number of blocks
ysr@1580 2785 // we just took from the cur_sz-size blocks list and which
ysr@1580 2786 // we will be splitting below.
ysr@2071 2787 ssize_t deaths = gfl->splitDeaths() +
ysr@1580 2788 fl_for_cur_sz.count();
ysr@2071 2789 gfl->set_splitDeaths(deaths);
ysr@1580 2790 }
ysr@1580 2791 }
ysr@1580 2792 }
ysr@1580 2793 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
ysr@1580 2794 if (found) {
ysr@1580 2795 if (k == 1) {
ysr@1580 2796 fl->prepend(&fl_for_cur_sz);
ysr@1580 2797 } else {
ysr@1580 2798 // Divide each block on fl_for_cur_sz up k ways.
ysr@1580 2799 FreeChunk* fc;
ysr@1580 2800 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
ysr@1580 2801 // Must do this in reverse order, so that anybody attempting to
ysr@1580 2802 // access the main chunk sees it as a single free block until we
ysr@1580 2803 // change it.
ysr@1580 2804 size_t fc_size = fc->size();
ysr@2071 2805 assert(fc->isFree(), "Error");
ysr@1580 2806 for (int i = k-1; i >= 0; i--) {
ysr@1580 2807 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ysr@2071 2808 assert((i != 0) ||
ysr@2071 2809 ((fc == ffc) && ffc->isFree() &&
ysr@2071 2810 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
ysr@2071 2811 "Counting error");
ysr@1580 2812 ffc->setSize(word_sz);
ysr@2071 2813 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ysr@1580 2814 ffc->linkNext(NULL);
ysr@1580 2815 // Above must occur before BOT is updated below.
ysr@2071 2816 OrderAccess::storestore();
ysr@2071 2817 // splitting from the right, fc_size == i * word_sz
ysr@2071 2818 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
ysr@1580 2819 fc_size -= word_sz;
ysr@2071 2820 assert(fc_size == i*word_sz, "Error");
ysr@2071 2821 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
ysr@1580 2822 _bt.verify_single_block((HeapWord*)fc, fc_size);
ysr@2071 2823 _bt.verify_single_block((HeapWord*)ffc, word_sz);
ysr@1580 2824 // Push this on "fl".
ysr@1580 2825 fl->returnChunkAtHead(ffc);
ysr@1580 2826 }
ysr@1580 2827 // TRAP
ysr@1580 2828 assert(fl->tail()->next() == NULL, "List invariant.");
ysr@1580 2829 }
ysr@1580 2830 }
ysr@1580 2831 // Update birth stats for this block size.
ysr@1580 2832 size_t num = fl->count();
ysr@1580 2833 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
ysr@1580 2834 Mutex::_no_safepoint_check_flag);
ysr@1580 2835 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
ysr@1580 2836 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2837 return;
duke@435 2838 }
duke@435 2839 }
duke@435 2840 }
duke@435 2841 // Otherwise, we'll split a block from the dictionary.
duke@435 2842 FreeChunk* fc = NULL;
duke@435 2843 FreeChunk* rem_fc = NULL;
duke@435 2844 size_t rem;
duke@435 2845 {
duke@435 2846 MutexLockerEx x(parDictionaryAllocLock(),
duke@435 2847 Mutex::_no_safepoint_check_flag);
duke@435 2848 while (n > 0) {
duke@435 2849 fc = dictionary()->getChunk(MAX2(n * word_sz,
duke@435 2850 _dictionary->minSize()),
duke@435 2851 FreeBlockDictionary::atLeast);
duke@435 2852 if (fc != NULL) {
ysr@2071 2853 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
duke@435 2854 dictionary()->dictCensusUpdate(fc->size(),
duke@435 2855 true /*split*/,
duke@435 2856 false /*birth*/);
duke@435 2857 break;
duke@435 2858 } else {
duke@435 2859 n--;
duke@435 2860 }
duke@435 2861 }
duke@435 2862 if (fc == NULL) return;
ysr@2071 2863 // Otherwise, split up that block.
ysr@1580 2864 assert((ssize_t)n >= 1, "Control point invariant");
ysr@2071 2865 assert(fc->isFree(), "Error: should be a free block");
ysr@2071 2866 _bt.verify_single_block((HeapWord*)fc, fc->size());
ysr@1580 2867 const size_t nn = fc->size() / word_sz;
duke@435 2868 n = MIN2(nn, n);
ysr@1580 2869 assert((ssize_t)n >= 1, "Control point invariant");
duke@435 2870 rem = fc->size() - n * word_sz;
duke@435 2871 // If there is a remainder, and it's too small, allocate one fewer.
duke@435 2872 if (rem > 0 && rem < MinChunkSize) {
duke@435 2873 n--; rem += word_sz;
duke@435 2874 }
jmasa@1583 2875 // Note that at this point we may have n == 0.
jmasa@1583 2876 assert((ssize_t)n >= 0, "Control point invariant");
jmasa@1583 2877
jmasa@1583 2878 // If n is 0, the chunk fc that was found is not large
jmasa@1583 2879 // enough to leave a viable remainder. We are unable to
jmasa@1583 2880 // allocate even one block. Return fc to the
jmasa@1583 2881 // dictionary and return, leaving "fl" empty.
jmasa@1583 2882 if (n == 0) {
jmasa@1583 2883 returnChunkToDictionary(fc);
ysr@2071 2884 assert(fl->count() == 0, "We never allocated any blocks");
jmasa@1583 2885 return;
jmasa@1583 2886 }
jmasa@1583 2887
duke@435 2888 // First return the remainder, if any.
duke@435 2889 // Note that we hold the lock until we decide if we're going to give
ysr@1580 2890 // back the remainder to the dictionary, since a concurrent allocation
duke@435 2891 // may otherwise see the heap as empty. (We're willing to take that
duke@435 2892 // hit if the block is a small block.)
duke@435 2893 if (rem > 0) {
duke@435 2894 size_t prefix_size = n * word_sz;
duke@435 2895 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
duke@435 2896 rem_fc->setSize(rem);
ysr@2071 2897 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2898 rem_fc->linkNext(NULL);
duke@435 2899 // Above must occur before BOT is updated below.
ysr@1580 2900 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
ysr@2071 2901 OrderAccess::storestore();
duke@435 2902 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
ysr@2071 2903 assert(fc->isFree(), "Error");
ysr@2071 2904 fc->setSize(prefix_size);
duke@435 2905 if (rem >= IndexSetSize) {
duke@435 2906 returnChunkToDictionary(rem_fc);
ysr@1580 2907 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
duke@435 2908 rem_fc = NULL;
duke@435 2909 }
duke@435 2910 // Otherwise, return it to the small list below.
duke@435 2911 }
duke@435 2912 }
duke@435 2913 if (rem_fc != NULL) {
duke@435 2914 MutexLockerEx x(_indexedFreeListParLocks[rem],
duke@435 2915 Mutex::_no_safepoint_check_flag);
duke@435 2916 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
duke@435 2917 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
duke@435 2918 smallSplitBirth(rem);
duke@435 2919 }
ysr@1580 2920 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
duke@435 2921 // Now do the splitting up.
duke@435 2922 // Must do this in reverse order, so that anybody attempting to
duke@435 2923 // access the main chunk sees it as a single free block until we
duke@435 2924 // change it.
duke@435 2925 size_t fc_size = n * word_sz;
duke@435 2926 // All but first chunk in this loop
duke@435 2927 for (ssize_t i = n-1; i > 0; i--) {
duke@435 2928 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
duke@435 2929 ffc->setSize(word_sz);
ysr@2071 2930 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
duke@435 2931 ffc->linkNext(NULL);
duke@435 2932 // Above must occur before BOT is updated below.
ysr@2071 2933 OrderAccess::storestore();
duke@435 2934 // splitting from the right, fc_size == (n - i + 1) * wordsize
ysr@2071 2935 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
duke@435 2936 fc_size -= word_sz;
duke@435 2937 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
duke@435 2938 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
duke@435 2939 _bt.verify_single_block((HeapWord*)fc, fc_size);
duke@435 2940 // Push this on "fl".
duke@435 2941 fl->returnChunkAtHead(ffc);
duke@435 2942 }
duke@435 2943 // First chunk
ysr@2071 2944 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
ysr@2071 2945 // The blocks above should show their new sizes before the first block below
duke@435 2946 fc->setSize(word_sz);
ysr@2071 2947 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
duke@435 2948 fc->linkNext(NULL);
duke@435 2949 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
duke@435 2950 _bt.verify_single_block((HeapWord*)fc, fc->size());
duke@435 2951 fl->returnChunkAtHead(fc);
duke@435 2952
ysr@1580 2953 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
duke@435 2954 {
ysr@1580 2955 // Update the stats for this block size.
duke@435 2956 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
duke@435 2957 Mutex::_no_safepoint_check_flag);
ysr@1580 2958 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
ysr@1580 2959 _indexedFreeList[word_sz].set_splitBirths(births);
ysr@1580 2960 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
ysr@1580 2961 // _indexedFreeList[word_sz].set_surplus(new_surplus);
duke@435 2962 }
duke@435 2963
duke@435 2964 // TRAP
duke@435 2965 assert(fl->tail()->next() == NULL, "List invariant.");
duke@435 2966 }
duke@435 2967
duke@435 2968 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2969 // for parallel rescan. See CMSParRemarkTask where this is currently used.
duke@435 2970 // XXX Need to suitably abstract and generalize this and the next
duke@435 2971 // method into one.
duke@435 2972 void
duke@435 2973 CompactibleFreeListSpace::
duke@435 2974 initialize_sequential_subtasks_for_rescan(int n_threads) {
duke@435 2975 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2976 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 2977 const size_t task_size = rescan_task_size();
duke@435 2978 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
ysr@775 2979 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
ysr@775 2980 assert(n_tasks == 0 ||
ysr@775 2981 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
ysr@775 2982 (used_region().start() + n_tasks*task_size >= used_region().end())),
ysr@775 2983 "n_tasks calculation incorrect");
duke@435 2984 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 2985 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 2986 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 2987 // need to finish in order to be done).
jmasa@2188 2988 pst->set_n_threads(n_threads);
duke@435 2989 pst->set_n_tasks((int)n_tasks);
duke@435 2990 }
duke@435 2991
duke@435 2992 // Set up the space's par_seq_tasks structure for work claiming
duke@435 2993 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
duke@435 2994 void
duke@435 2995 CompactibleFreeListSpace::
duke@435 2996 initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 2997 HeapWord* low) {
duke@435 2998 // The "size" of each task is fixed according to rescan_task_size.
duke@435 2999 assert(n_threads > 0, "Unexpected n_threads argument");
duke@435 3000 const size_t task_size = marking_task_size();
duke@435 3001 assert(task_size > CardTableModRefBS::card_size_in_words &&
duke@435 3002 (task_size % CardTableModRefBS::card_size_in_words == 0),
duke@435 3003 "Otherwise arithmetic below would be incorrect");
duke@435 3004 MemRegion span = _gen->reserved();
duke@435 3005 if (low != NULL) {
duke@435 3006 if (span.contains(low)) {
duke@435 3007 // Align low down to a card boundary so that
duke@435 3008 // we can use block_offset_careful() on span boundaries.
duke@435 3009 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
duke@435 3010 CardTableModRefBS::card_size);
duke@435 3011 // Clip span prefix at aligned_low
duke@435 3012 span = span.intersection(MemRegion(aligned_low, span.end()));
duke@435 3013 } else if (low > span.end()) {
duke@435 3014 span = MemRegion(low, low); // Null region
duke@435 3015 } // else use entire span
duke@435 3016 }
duke@435 3017 assert(span.is_empty() ||
duke@435 3018 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
duke@435 3019 "span should start at a card boundary");
duke@435 3020 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
duke@435 3021 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
duke@435 3022 assert(n_tasks == 0 ||
duke@435 3023 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
duke@435 3024 (span.start() + n_tasks*task_size >= span.end())),
ysr@775 3025 "n_tasks calculation incorrect");
duke@435 3026 SequentialSubTasksDone* pst = conc_par_seq_tasks();
duke@435 3027 assert(!pst->valid(), "Clobbering existing data?");
jmasa@2188 3028 // Sets the condition for completion of the subtask (how many threads
jmasa@2188 3029 // need to finish in order to be done).
jmasa@2188 3030 pst->set_n_threads(n_threads);
duke@435 3031 pst->set_n_tasks((int)n_tasks);
duke@435 3032 }

mercurial