Wed, 22 May 2013 08:04:58 +0200
8014971: Minor code cleanup of the freelist management
Reviewed-by: jwilhelm, jmasa, tschatzl
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
30 #include "gc_implementation/shared/liveRange.hpp"
31 #include "gc_implementation/shared/spaceDecorator.hpp"
32 #include "gc_interface/collectedHeap.inline.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/blockOffsetTable.inline.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/globals.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/init.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/copy.hpp"
45 /////////////////////////////////////////////////////////////////////////
46 //// CompactibleFreeListSpace
47 /////////////////////////////////////////////////////////////////////////
49 // highest ranked free list lock rank
50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
52 // Defaults are 0 so things will break badly if incorrectly initialized.
53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
56 size_t MinChunkSize = 0;
58 void CompactibleFreeListSpace::set_cms_values() {
59 // Set CMS global values
60 assert(MinChunkSize == 0, "already set");
62 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
63 // for chunks to contain a FreeChunk.
64 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
65 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
67 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
68 IndexSetStart = MinChunkSize;
69 IndexSetStride = MinObjAlignment;
70 }
72 // Constructor
73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
74 MemRegion mr, bool use_adaptive_freelists,
75 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
76 _dictionaryChoice(dictionaryChoice),
77 _adaptive_freelists(use_adaptive_freelists),
78 _bt(bs, mr),
79 // free list locks are in the range of values taken by _lockRank
80 // This range currently is [_leaf+2, _leaf+3]
81 // Note: this requires that CFLspace c'tors
82 // are called serially in the order in which the locks are
83 // are acquired in the program text. This is true today.
84 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
85 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
86 "CompactibleFreeListSpace._dict_par_lock", true),
87 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
88 CMSRescanMultiple),
89 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
90 CMSConcMarkMultiple),
91 _collector(NULL)
92 {
93 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
94 "FreeChunk is larger than expected");
95 _bt.set_space(this);
96 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
97 // We have all of "mr", all of which we place in the dictionary
98 // as one big chunk. We'll need to decide here which of several
99 // possible alternative dictionary implementations to use. For
100 // now the choice is easy, since we have only one working
101 // implementation, namely, the simple binary tree (splaying
102 // temporarily disabled).
103 switch (dictionaryChoice) {
104 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
105 _dictionary = new AFLBinaryTreeDictionary(mr);
106 break;
107 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
108 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
109 default:
110 warning("dictionaryChoice: selected option not understood; using"
111 " default BinaryTreeDictionary implementation instead.");
112 }
113 assert(_dictionary != NULL, "CMS dictionary initialization");
114 // The indexed free lists are initially all empty and are lazily
115 // filled in on demand. Initialize the array elements to NULL.
116 initializeIndexedFreeListArray();
118 // Not using adaptive free lists assumes that allocation is first
119 // from the linAB's. Also a cms perm gen which can be compacted
120 // has to have the klass's klassKlass allocated at a lower
121 // address in the heap than the klass so that the klassKlass is
122 // moved to its new location before the klass is moved.
123 // Set the _refillSize for the linear allocation blocks
124 if (!use_adaptive_freelists) {
125 FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
126 FreeBlockDictionary<FreeChunk>::atLeast);
127 // The small linAB initially has all the space and will allocate
128 // a chunk of any size.
129 HeapWord* addr = (HeapWord*) fc;
130 _smallLinearAllocBlock.set(addr, fc->size() ,
131 1024*SmallForLinearAlloc, fc->size());
132 // Note that _unallocated_block is not updated here.
133 // Allocations from the linear allocation block should
134 // update it.
135 } else {
136 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
137 SmallForLinearAlloc);
138 }
139 // CMSIndexedFreeListReplenish should be at least 1
140 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
141 _promoInfo.setSpace(this);
142 if (UseCMSBestFit) {
143 _fitStrategy = FreeBlockBestFitFirst;
144 } else {
145 _fitStrategy = FreeBlockStrategyNone;
146 }
147 check_free_list_consistency();
149 // Initialize locks for parallel case.
151 if (CollectedHeap::use_parallel_gc_threads()) {
152 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
153 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
154 "a freelist par lock",
155 true);
156 DEBUG_ONLY(
157 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
158 )
159 }
160 _dictionary->set_par_lock(&_parDictionaryAllocLock);
161 }
162 }
164 // Like CompactibleSpace forward() but always calls cross_threshold() to
165 // update the block offset table. Removed initialize_threshold call because
166 // CFLS does not use a block offset array for contiguous spaces.
167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
168 CompactPoint* cp, HeapWord* compact_top) {
169 // q is alive
170 // First check if we should switch compaction space
171 assert(this == cp->space, "'this' should be current compaction space.");
172 size_t compaction_max_size = pointer_delta(end(), compact_top);
173 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
174 "virtual adjustObjectSize_v() method is not correct");
175 size_t adjusted_size = adjustObjectSize(size);
176 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
177 "no small fragments allowed");
178 assert(minimum_free_block_size() == MinChunkSize,
179 "for de-virtualized reference below");
180 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
181 if (adjusted_size + MinChunkSize > compaction_max_size &&
182 adjusted_size != compaction_max_size) {
183 do {
184 // switch to next compaction space
185 cp->space->set_compaction_top(compact_top);
186 cp->space = cp->space->next_compaction_space();
187 if (cp->space == NULL) {
188 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
189 assert(cp->gen != NULL, "compaction must succeed");
190 cp->space = cp->gen->first_compaction_space();
191 assert(cp->space != NULL, "generation must have a first compaction space");
192 }
193 compact_top = cp->space->bottom();
194 cp->space->set_compaction_top(compact_top);
195 // The correct adjusted_size may not be the same as that for this method
196 // (i.e., cp->space may no longer be "this" so adjust the size again.
197 // Use the virtual method which is not used above to save the virtual
198 // dispatch.
199 adjusted_size = cp->space->adjust_object_size_v(size);
200 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
201 assert(cp->space->minimum_free_block_size() == 0, "just checking");
202 } while (adjusted_size > compaction_max_size);
203 }
205 // store the forwarding pointer into the mark word
206 if ((HeapWord*)q != compact_top) {
207 q->forward_to(oop(compact_top));
208 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
209 } else {
210 // if the object isn't moving we can just set the mark to the default
211 // mark and handle it specially later on.
212 q->init_mark();
213 assert(q->forwardee() == NULL, "should be forwarded to NULL");
214 }
216 compact_top += adjusted_size;
218 // we need to update the offset table so that the beginnings of objects can be
219 // found during scavenge. Note that we are updating the offset table based on
220 // where the object will be once the compaction phase finishes.
222 // Always call cross_threshold(). A contiguous space can only call it when
223 // the compaction_top exceeds the current threshold but not for an
224 // non-contiguous space.
225 cp->threshold =
226 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
227 return compact_top;
228 }
230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
231 // and use of single_block instead of alloc_block. The name here is not really
232 // appropriate - maybe a more general name could be invented for both the
233 // contiguous and noncontiguous spaces.
235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
236 _bt.single_block(start, the_end);
237 return end();
238 }
240 // Initialize them to NULL.
241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
242 for (size_t i = 0; i < IndexSetSize; i++) {
243 // Note that on platforms where objects are double word aligned,
244 // the odd array elements are not used. It is convenient, however,
245 // to map directly from the object size to the array element.
246 _indexedFreeList[i].reset(IndexSetSize);
247 _indexedFreeList[i].set_size(i);
248 assert(_indexedFreeList[i].count() == 0, "reset check failed");
249 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
250 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
251 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
252 }
253 }
255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
256 for (size_t i = 1; i < IndexSetSize; i++) {
257 assert(_indexedFreeList[i].size() == (size_t) i,
258 "Indexed free list sizes are incorrect");
259 _indexedFreeList[i].reset(IndexSetSize);
260 assert(_indexedFreeList[i].count() == 0, "reset check failed");
261 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
262 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
263 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
264 }
265 }
267 void CompactibleFreeListSpace::reset(MemRegion mr) {
268 resetIndexedFreeListArray();
269 dictionary()->reset();
270 if (BlockOffsetArrayUseUnallocatedBlock) {
271 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
272 // Everything's allocated until proven otherwise.
273 _bt.set_unallocated_block(end());
274 }
275 if (!mr.is_empty()) {
276 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
277 _bt.single_block(mr.start(), mr.word_size());
278 FreeChunk* fc = (FreeChunk*) mr.start();
279 fc->set_size(mr.word_size());
280 if (mr.word_size() >= IndexSetSize ) {
281 returnChunkToDictionary(fc);
282 } else {
283 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
284 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
285 }
286 coalBirth(mr.word_size());
287 }
288 _promoInfo.reset();
289 _smallLinearAllocBlock._ptr = NULL;
290 _smallLinearAllocBlock._word_size = 0;
291 }
293 void CompactibleFreeListSpace::reset_after_compaction() {
294 // Reset the space to the new reality - one free chunk.
295 MemRegion mr(compaction_top(), end());
296 reset(mr);
297 // Now refill the linear allocation block(s) if possible.
298 if (_adaptive_freelists) {
299 refillLinearAllocBlocksIfNeeded();
300 } else {
301 // Place as much of mr in the linAB as we can get,
302 // provided it was big enough to go into the dictionary.
303 FreeChunk* fc = dictionary()->find_largest_dict();
304 if (fc != NULL) {
305 assert(fc->size() == mr.word_size(),
306 "Why was the chunk broken up?");
307 removeChunkFromDictionary(fc);
308 HeapWord* addr = (HeapWord*) fc;
309 _smallLinearAllocBlock.set(addr, fc->size() ,
310 1024*SmallForLinearAlloc, fc->size());
311 // Note that _unallocated_block is not updated here.
312 }
313 }
314 }
316 // Walks the entire dictionary, returning a coterminal
317 // chunk, if it exists. Use with caution since it involves
318 // a potentially complete walk of a potentially large tree.
319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
321 assert_lock_strong(&_freelistLock);
323 return dictionary()->find_chunk_ends_at(end());
324 }
327 #ifndef PRODUCT
328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
329 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
330 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
331 }
332 }
334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
335 size_t sum = 0;
336 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
337 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
338 }
339 return sum;
340 }
342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
343 size_t count = 0;
344 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
345 debug_only(
346 ssize_t total_list_count = 0;
347 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
348 fc = fc->next()) {
349 total_list_count++;
350 }
351 assert(total_list_count == _indexedFreeList[i].count(),
352 "Count in list is incorrect");
353 )
354 count += _indexedFreeList[i].count();
355 }
356 return count;
357 }
359 size_t CompactibleFreeListSpace::totalCount() {
360 size_t num = totalCountInIndexedFreeLists();
361 num += dictionary()->total_count();
362 if (_smallLinearAllocBlock._word_size != 0) {
363 num++;
364 }
365 return num;
366 }
367 #endif
369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
370 FreeChunk* fc = (FreeChunk*) p;
371 return fc->is_free();
372 }
374 size_t CompactibleFreeListSpace::used() const {
375 return capacity() - free();
376 }
378 size_t CompactibleFreeListSpace::free() const {
379 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
380 // if you do this while the structures are in flux you
381 // may get an approximate answer only; for instance
382 // because there is concurrent allocation either
383 // directly by mutators or for promotion during a GC.
384 // It's "MT-safe", however, in the sense that you are guaranteed
385 // not to crash and burn, for instance, because of walking
386 // pointers that could disappear as you were walking them.
387 // The approximation is because the various components
388 // that are read below are not read atomically (and
389 // further the computation of totalSizeInIndexedFreeLists()
390 // is itself a non-atomic computation. The normal use of
391 // this is during a resize operation at the end of GC
392 // and at that time you are guaranteed to get the
393 // correct actual value. However, for instance, this is
394 // also read completely asynchronously by the "perf-sampler"
395 // that supports jvmstat, and you are apt to see the values
396 // flicker in such cases.
397 assert(_dictionary != NULL, "No _dictionary?");
398 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
399 totalSizeInIndexedFreeLists() +
400 _smallLinearAllocBlock._word_size) * HeapWordSize;
401 }
403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
404 assert(_dictionary != NULL, "No _dictionary?");
405 assert_locked();
406 size_t res = _dictionary->max_chunk_size();
407 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
408 (size_t) SmallForLinearAlloc - 1));
409 // XXX the following could potentially be pretty slow;
410 // should one, pesimally for the rare cases when res
411 // caclulated above is less than IndexSetSize,
412 // just return res calculated above? My reasoning was that
413 // those cases will be so rare that the extra time spent doesn't
414 // really matter....
415 // Note: do not change the loop test i >= res + IndexSetStride
416 // to i > res below, because i is unsigned and res may be zero.
417 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
418 i -= IndexSetStride) {
419 if (_indexedFreeList[i].head() != NULL) {
420 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
421 return i;
422 }
423 }
424 return res;
425 }
427 void LinearAllocBlock::print_on(outputStream* st) const {
428 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
429 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
430 _ptr, _word_size, _refillSize, _allocation_size_limit);
431 }
433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
434 st->print_cr("COMPACTIBLE FREELIST SPACE");
435 st->print_cr(" Space:");
436 Space::print_on(st);
438 st->print_cr("promoInfo:");
439 _promoInfo.print_on(st);
441 st->print_cr("_smallLinearAllocBlock");
442 _smallLinearAllocBlock.print_on(st);
444 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
446 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
447 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
448 }
450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
451 const {
452 reportIndexedFreeListStatistics();
453 gclog_or_tty->print_cr("Layout of Indexed Freelists");
454 gclog_or_tty->print_cr("---------------------------");
455 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
456 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
457 _indexedFreeList[i].print_on(gclog_or_tty);
458 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
459 fc = fc->next()) {
460 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
461 fc, (HeapWord*)fc + i,
462 fc->cantCoalesce() ? "\t CC" : "");
463 }
464 }
465 }
467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
468 const {
469 _promoInfo.print_on(st);
470 }
472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
473 const {
474 _dictionary->report_statistics();
475 st->print_cr("Layout of Freelists in Tree");
476 st->print_cr("---------------------------");
477 _dictionary->print_free_lists(st);
478 }
480 class BlkPrintingClosure: public BlkClosure {
481 const CMSCollector* _collector;
482 const CompactibleFreeListSpace* _sp;
483 const CMSBitMap* _live_bit_map;
484 const bool _post_remark;
485 outputStream* _st;
486 public:
487 BlkPrintingClosure(const CMSCollector* collector,
488 const CompactibleFreeListSpace* sp,
489 const CMSBitMap* live_bit_map,
490 outputStream* st):
491 _collector(collector),
492 _sp(sp),
493 _live_bit_map(live_bit_map),
494 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
495 _st(st) { }
496 size_t do_blk(HeapWord* addr);
497 };
499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
500 size_t sz = _sp->block_size_no_stall(addr, _collector);
501 assert(sz != 0, "Should always be able to compute a size");
502 if (_sp->block_is_obj(addr)) {
503 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
504 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
505 addr,
506 dead ? "dead" : "live",
507 sz,
508 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
509 if (CMSPrintObjectsInDump && !dead) {
510 oop(addr)->print_on(_st);
511 _st->print_cr("--------------------------------------");
512 }
513 } else { // free block
514 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
515 addr, sz, CMSPrintChunksInDump ? ":" : ".");
516 if (CMSPrintChunksInDump) {
517 ((FreeChunk*)addr)->print_on(_st);
518 _st->print_cr("--------------------------------------");
519 }
520 }
521 return sz;
522 }
524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
525 outputStream* st) {
526 st->print_cr("\n=========================");
527 st->print_cr("Block layout in CMS Heap:");
528 st->print_cr("=========================");
529 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
530 blk_iterate(&bpcl);
532 st->print_cr("\n=======================================");
533 st->print_cr("Order & Layout of Promotion Info Blocks");
534 st->print_cr("=======================================");
535 print_promo_info_blocks(st);
537 st->print_cr("\n===========================");
538 st->print_cr("Order of Indexed Free Lists");
539 st->print_cr("=========================");
540 print_indexed_free_lists(st);
542 st->print_cr("\n=================================");
543 st->print_cr("Order of Free Lists in Dictionary");
544 st->print_cr("=================================");
545 print_dictionary_free_lists(st);
546 }
549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
550 assert_lock_strong(&_freelistLock);
551 assert(PrintFLSStatistics != 0, "Reporting error");
552 _dictionary->report_statistics();
553 if (PrintFLSStatistics > 1) {
554 reportIndexedFreeListStatistics();
555 size_t total_size = totalSizeInIndexedFreeLists() +
556 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
557 gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
558 }
559 }
561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
562 assert_lock_strong(&_freelistLock);
563 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
564 "--------------------------------\n");
565 size_t total_size = totalSizeInIndexedFreeLists();
566 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
567 gclog_or_tty->print("Total Free Space: %d\n", total_size);
568 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
569 gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
570 if (free_blocks != 0) {
571 gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
572 }
573 }
575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
576 size_t res = 0;
577 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
578 debug_only(
579 ssize_t recount = 0;
580 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
581 fc = fc->next()) {
582 recount += 1;
583 }
584 assert(recount == _indexedFreeList[i].count(),
585 "Incorrect count in list");
586 )
587 res += _indexedFreeList[i].count();
588 }
589 return res;
590 }
592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
593 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
594 if (_indexedFreeList[i].head() != NULL) {
595 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
596 return (size_t)i;
597 }
598 }
599 return 0;
600 }
602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
603 HeapWord* prevEnd = end();
604 assert(prevEnd != value, "unnecessary set_end call");
605 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
606 "New end is below unallocated block");
607 _end = value;
608 if (prevEnd != NULL) {
609 // Resize the underlying block offset table.
610 _bt.resize(pointer_delta(value, bottom()));
611 if (value <= prevEnd) {
612 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
613 "New end is below unallocated block");
614 } else {
615 // Now, take this new chunk and add it to the free blocks.
616 // Note that the BOT has not yet been updated for this block.
617 size_t newFcSize = pointer_delta(value, prevEnd);
618 // XXX This is REALLY UGLY and should be fixed up. XXX
619 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
620 // Mark the boundary of the new block in BOT
621 _bt.mark_block(prevEnd, value);
622 // put it all in the linAB
623 if (ParallelGCThreads == 0) {
624 _smallLinearAllocBlock._ptr = prevEnd;
625 _smallLinearAllocBlock._word_size = newFcSize;
626 repairLinearAllocBlock(&_smallLinearAllocBlock);
627 } else { // ParallelGCThreads > 0
628 MutexLockerEx x(parDictionaryAllocLock(),
629 Mutex::_no_safepoint_check_flag);
630 _smallLinearAllocBlock._ptr = prevEnd;
631 _smallLinearAllocBlock._word_size = newFcSize;
632 repairLinearAllocBlock(&_smallLinearAllocBlock);
633 }
634 // Births of chunks put into a LinAB are not recorded. Births
635 // of chunks as they are allocated out of a LinAB are.
636 } else {
637 // Add the block to the free lists, if possible coalescing it
638 // with the last free block, and update the BOT and census data.
639 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
640 }
641 }
642 }
643 }
645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
646 CompactibleFreeListSpace* _cfls;
647 CMSCollector* _collector;
648 protected:
649 // Override.
650 #define walk_mem_region_with_cl_DECL(ClosureType) \
651 virtual void walk_mem_region_with_cl(MemRegion mr, \
652 HeapWord* bottom, HeapWord* top, \
653 ClosureType* cl); \
654 void walk_mem_region_with_cl_par(MemRegion mr, \
655 HeapWord* bottom, HeapWord* top, \
656 ClosureType* cl); \
657 void walk_mem_region_with_cl_nopar(MemRegion mr, \
658 HeapWord* bottom, HeapWord* top, \
659 ClosureType* cl)
660 walk_mem_region_with_cl_DECL(ExtendedOopClosure);
661 walk_mem_region_with_cl_DECL(FilteringClosure);
663 public:
664 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
665 CMSCollector* collector,
666 ExtendedOopClosure* cl,
667 CardTableModRefBS::PrecisionStyle precision,
668 HeapWord* boundary) :
669 Filtering_DCTOC(sp, cl, precision, boundary),
670 _cfls(sp), _collector(collector) {}
671 };
673 // We de-virtualize the block-related calls below, since we know that our
674 // space is a CompactibleFreeListSpace.
676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
678 HeapWord* bottom, \
679 HeapWord* top, \
680 ClosureType* cl) { \
681 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
682 if (is_par) { \
683 assert(SharedHeap::heap()->n_par_threads() == \
684 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
685 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
686 } else { \
687 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
688 } \
689 } \
690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
691 HeapWord* bottom, \
692 HeapWord* top, \
693 ClosureType* cl) { \
694 /* Skip parts that are before "mr", in case "block_start" sent us \
695 back too far. */ \
696 HeapWord* mr_start = mr.start(); \
697 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
698 HeapWord* next = bottom + bot_size; \
699 while (next < mr_start) { \
700 bottom = next; \
701 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
702 next = bottom + bot_size; \
703 } \
704 \
705 while (bottom < top) { \
706 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
707 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
708 oop(bottom)) && \
709 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
710 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
711 bottom += _cfls->adjustObjectSize(word_sz); \
712 } else { \
713 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
714 } \
715 } \
716 } \
717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
718 HeapWord* bottom, \
719 HeapWord* top, \
720 ClosureType* cl) { \
721 /* Skip parts that are before "mr", in case "block_start" sent us \
722 back too far. */ \
723 HeapWord* mr_start = mr.start(); \
724 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
725 HeapWord* next = bottom + bot_size; \
726 while (next < mr_start) { \
727 bottom = next; \
728 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
729 next = bottom + bot_size; \
730 } \
731 \
732 while (bottom < top) { \
733 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
734 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
735 oop(bottom)) && \
736 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
737 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
738 bottom += _cfls->adjustObjectSize(word_sz); \
739 } else { \
740 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
741 } \
742 } \
743 }
745 // (There are only two of these, rather than N, because the split is due
746 // only to the introduction of the FilteringClosure, a local part of the
747 // impl of this abstraction.)
748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
751 DirtyCardToOopClosure*
752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
753 CardTableModRefBS::PrecisionStyle precision,
754 HeapWord* boundary) {
755 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
756 }
759 // Note on locking for the space iteration functions:
760 // since the collector's iteration activities are concurrent with
761 // allocation activities by mutators, absent a suitable mutual exclusion
762 // mechanism the iterators may go awry. For instace a block being iterated
763 // may suddenly be allocated or divided up and part of it allocated and
764 // so on.
766 // Apply the given closure to each block in the space.
767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
768 assert_lock_strong(freelistLock());
769 HeapWord *cur, *limit;
770 for (cur = bottom(), limit = end(); cur < limit;
771 cur += cl->do_blk_careful(cur));
772 }
774 // Apply the given closure to each block in the space.
775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
776 assert_lock_strong(freelistLock());
777 HeapWord *cur, *limit;
778 for (cur = bottom(), limit = end(); cur < limit;
779 cur += cl->do_blk(cur));
780 }
782 // Apply the given closure to each oop in the space.
783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
784 assert_lock_strong(freelistLock());
785 HeapWord *cur, *limit;
786 size_t curSize;
787 for (cur = bottom(), limit = end(); cur < limit;
788 cur += curSize) {
789 curSize = block_size(cur);
790 if (block_is_obj(cur)) {
791 oop(cur)->oop_iterate(cl);
792 }
793 }
794 }
796 // Apply the given closure to each oop in the space \intersect memory region.
797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
798 assert_lock_strong(freelistLock());
799 if (is_empty()) {
800 return;
801 }
802 MemRegion cur = MemRegion(bottom(), end());
803 mr = mr.intersection(cur);
804 if (mr.is_empty()) {
805 return;
806 }
807 if (mr.equals(cur)) {
808 oop_iterate(cl);
809 return;
810 }
811 assert(mr.end() <= end(), "just took an intersection above");
812 HeapWord* obj_addr = block_start(mr.start());
813 HeapWord* t = mr.end();
815 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
816 if (block_is_obj(obj_addr)) {
817 // Handle first object specially.
818 oop obj = oop(obj_addr);
819 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
820 } else {
821 FreeChunk* fc = (FreeChunk*)obj_addr;
822 obj_addr += fc->size();
823 }
824 while (obj_addr < t) {
825 HeapWord* obj = obj_addr;
826 obj_addr += block_size(obj_addr);
827 // If "obj_addr" is not greater than top, then the
828 // entire object "obj" is within the region.
829 if (obj_addr <= t) {
830 if (block_is_obj(obj)) {
831 oop(obj)->oop_iterate(cl);
832 }
833 } else {
834 // "obj" extends beyond end of region
835 if (block_is_obj(obj)) {
836 oop(obj)->oop_iterate(&smr_blk);
837 }
838 break;
839 }
840 }
841 }
843 // NOTE: In the following methods, in order to safely be able to
844 // apply the closure to an object, we need to be sure that the
845 // object has been initialized. We are guaranteed that an object
846 // is initialized if we are holding the Heap_lock with the
847 // world stopped.
848 void CompactibleFreeListSpace::verify_objects_initialized() const {
849 if (is_init_completed()) {
850 assert_locked_or_safepoint(Heap_lock);
851 if (Universe::is_fully_initialized()) {
852 guarantee(SafepointSynchronize::is_at_safepoint(),
853 "Required for objects to be initialized");
854 }
855 } // else make a concession at vm start-up
856 }
858 // Apply the given closure to each object in the space
859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
860 assert_lock_strong(freelistLock());
861 NOT_PRODUCT(verify_objects_initialized());
862 HeapWord *cur, *limit;
863 size_t curSize;
864 for (cur = bottom(), limit = end(); cur < limit;
865 cur += curSize) {
866 curSize = block_size(cur);
867 if (block_is_obj(cur)) {
868 blk->do_object(oop(cur));
869 }
870 }
871 }
873 // Apply the given closure to each live object in the space
874 // The usage of CompactibleFreeListSpace
875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
876 // objects in the space with references to objects that are no longer
877 // valid. For example, an object may reference another object
878 // that has already been sweep up (collected). This method uses
879 // obj_is_alive() to determine whether it is safe to apply the closure to
880 // an object. See obj_is_alive() for details on how liveness of an
881 // object is decided.
883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
884 assert_lock_strong(freelistLock());
885 NOT_PRODUCT(verify_objects_initialized());
886 HeapWord *cur, *limit;
887 size_t curSize;
888 for (cur = bottom(), limit = end(); cur < limit;
889 cur += curSize) {
890 curSize = block_size(cur);
891 if (block_is_obj(cur) && obj_is_alive(cur)) {
892 blk->do_object(oop(cur));
893 }
894 }
895 }
897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
898 UpwardsObjectClosure* cl) {
899 assert_locked(freelistLock());
900 NOT_PRODUCT(verify_objects_initialized());
901 Space::object_iterate_mem(mr, cl);
902 }
904 // Callers of this iterator beware: The closure application should
905 // be robust in the face of uninitialized objects and should (always)
906 // return a correct size so that the next addr + size below gives us a
907 // valid block boundary. [See for instance,
908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
909 // in ConcurrentMarkSweepGeneration.cpp.]
910 HeapWord*
911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
912 assert_lock_strong(freelistLock());
913 HeapWord *addr, *last;
914 size_t size;
915 for (addr = bottom(), last = end();
916 addr < last; addr += size) {
917 FreeChunk* fc = (FreeChunk*)addr;
918 if (fc->is_free()) {
919 // Since we hold the free list lock, which protects direct
920 // allocation in this generation by mutators, a free object
921 // will remain free throughout this iteration code.
922 size = fc->size();
923 } else {
924 // Note that the object need not necessarily be initialized,
925 // because (for instance) the free list lock does NOT protect
926 // object initialization. The closure application below must
927 // therefore be correct in the face of uninitialized objects.
928 size = cl->do_object_careful(oop(addr));
929 if (size == 0) {
930 // An unparsable object found. Signal early termination.
931 return addr;
932 }
933 }
934 }
935 return NULL;
936 }
938 // Callers of this iterator beware: The closure application should
939 // be robust in the face of uninitialized objects and should (always)
940 // return a correct size so that the next addr + size below gives us a
941 // valid block boundary. [See for instance,
942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
943 // in ConcurrentMarkSweepGeneration.cpp.]
944 HeapWord*
945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
946 ObjectClosureCareful* cl) {
947 assert_lock_strong(freelistLock());
948 // Can't use used_region() below because it may not necessarily
949 // be the same as [bottom(),end()); although we could
950 // use [used_region().start(),round_to(used_region().end(),CardSize)),
951 // that appears too cumbersome, so we just do the simpler check
952 // in the assertion below.
953 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
954 "mr should be non-empty and within used space");
955 HeapWord *addr, *end;
956 size_t size;
957 for (addr = block_start_careful(mr.start()), end = mr.end();
958 addr < end; addr += size) {
959 FreeChunk* fc = (FreeChunk*)addr;
960 if (fc->is_free()) {
961 // Since we hold the free list lock, which protects direct
962 // allocation in this generation by mutators, a free object
963 // will remain free throughout this iteration code.
964 size = fc->size();
965 } else {
966 // Note that the object need not necessarily be initialized,
967 // because (for instance) the free list lock does NOT protect
968 // object initialization. The closure application below must
969 // therefore be correct in the face of uninitialized objects.
970 size = cl->do_object_careful_m(oop(addr), mr);
971 if (size == 0) {
972 // An unparsable object found. Signal early termination.
973 return addr;
974 }
975 }
976 }
977 return NULL;
978 }
981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
982 NOT_PRODUCT(verify_objects_initialized());
983 return _bt.block_start(p);
984 }
986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
987 return _bt.block_start_careful(p);
988 }
990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
991 NOT_PRODUCT(verify_objects_initialized());
992 // This must be volatile, or else there is a danger that the compiler
993 // will compile the code below into a sometimes-infinite loop, by keeping
994 // the value read the first time in a register.
995 while (true) {
996 // We must do this until we get a consistent view of the object.
997 if (FreeChunk::indicatesFreeChunk(p)) {
998 volatile FreeChunk* fc = (volatile FreeChunk*)p;
999 size_t res = fc->size();
1000 // If the object is still a free chunk, return the size, else it
1001 // has been allocated so try again.
1002 if (FreeChunk::indicatesFreeChunk(p)) {
1003 assert(res != 0, "Block size should not be 0");
1004 return res;
1005 }
1006 } else {
1007 // must read from what 'p' points to in each loop.
1008 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1009 if (k != NULL) {
1010 assert(k->is_klass(), "Should really be klass oop.");
1011 oop o = (oop)p;
1012 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1013 size_t res = o->size_given_klass(k);
1014 res = adjustObjectSize(res);
1015 assert(res != 0, "Block size should not be 0");
1016 return res;
1017 }
1018 }
1019 }
1020 }
1022 // TODO: Now that is_parsable is gone, we should combine these two functions.
1023 // A variant of the above that uses the Printezis bits for
1024 // unparsable but allocated objects. This avoids any possible
1025 // stalls waiting for mutators to initialize objects, and is
1026 // thus potentially faster than the variant above. However,
1027 // this variant may return a zero size for a block that is
1028 // under mutation and for which a consistent size cannot be
1029 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1030 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1031 const CMSCollector* c)
1032 const {
1033 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1034 // This must be volatile, or else there is a danger that the compiler
1035 // will compile the code below into a sometimes-infinite loop, by keeping
1036 // the value read the first time in a register.
1037 DEBUG_ONLY(uint loops = 0;)
1038 while (true) {
1039 // We must do this until we get a consistent view of the object.
1040 if (FreeChunk::indicatesFreeChunk(p)) {
1041 volatile FreeChunk* fc = (volatile FreeChunk*)p;
1042 size_t res = fc->size();
1043 if (FreeChunk::indicatesFreeChunk(p)) {
1044 assert(res != 0, "Block size should not be 0");
1045 assert(loops == 0, "Should be 0");
1046 return res;
1047 }
1048 } else {
1049 // must read from what 'p' points to in each loop.
1050 Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1051 // We trust the size of any object that has a non-NULL
1052 // klass and (for those in the perm gen) is parsable
1053 // -- irrespective of its conc_safe-ty.
1054 if (k != NULL) {
1055 assert(k->is_klass(), "Should really be klass oop.");
1056 oop o = (oop)p;
1057 assert(o->is_oop(), "Should be an oop");
1058 size_t res = o->size_given_klass(k);
1059 res = adjustObjectSize(res);
1060 assert(res != 0, "Block size should not be 0");
1061 return res;
1062 } else {
1063 // May return 0 if P-bits not present.
1064 return c->block_size_if_printezis_bits(p);
1065 }
1066 }
1067 assert(loops == 0, "Can loop at most once");
1068 DEBUG_ONLY(loops++;)
1069 }
1070 }
1072 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1073 NOT_PRODUCT(verify_objects_initialized());
1074 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1075 FreeChunk* fc = (FreeChunk*)p;
1076 if (fc->is_free()) {
1077 return fc->size();
1078 } else {
1079 // Ignore mark word because this may be a recently promoted
1080 // object whose mark word is used to chain together grey
1081 // objects (the last one would have a null value).
1082 assert(oop(p)->is_oop(true), "Should be an oop");
1083 return adjustObjectSize(oop(p)->size());
1084 }
1085 }
1087 // This implementation assumes that the property of "being an object" is
1088 // stable. But being a free chunk may not be (because of parallel
1089 // promotion.)
1090 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1091 FreeChunk* fc = (FreeChunk*)p;
1092 assert(is_in_reserved(p), "Should be in space");
1093 // When doing a mark-sweep-compact of the CMS generation, this
1094 // assertion may fail because prepare_for_compaction() uses
1095 // space that is garbage to maintain information on ranges of
1096 // live objects so that these live ranges can be moved as a whole.
1097 // Comment out this assertion until that problem can be solved
1098 // (i.e., that the block start calculation may look at objects
1099 // at address below "p" in finding the object that contains "p"
1100 // and those objects (if garbage) may have been modified to hold
1101 // live range information.
1102 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1103 // "Should be a block boundary");
1104 if (FreeChunk::indicatesFreeChunk(p)) return false;
1105 Klass* k = oop(p)->klass_or_null();
1106 if (k != NULL) {
1107 // Ignore mark word because it may have been used to
1108 // chain together promoted objects (the last one
1109 // would have a null value).
1110 assert(oop(p)->is_oop(true), "Should be an oop");
1111 return true;
1112 } else {
1113 return false; // Was not an object at the start of collection.
1114 }
1115 }
1117 // Check if the object is alive. This fact is checked either by consulting
1118 // the main marking bitmap in the sweeping phase or, if it's a permanent
1119 // generation and we're not in the sweeping phase, by checking the
1120 // perm_gen_verify_bit_map where we store the "deadness" information if
1121 // we did not sweep the perm gen in the most recent previous GC cycle.
1122 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1123 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1124 "Else races are possible");
1125 assert(block_is_obj(p), "The address should point to an object");
1127 // If we're sweeping, we use object liveness information from the main bit map
1128 // for both perm gen and old gen.
1129 // We don't need to lock the bitmap (live_map or dead_map below), because
1130 // EITHER we are in the middle of the sweeping phase, and the
1131 // main marking bit map (live_map below) is locked,
1132 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1133 // is stable, because it's mutated only in the sweeping phase.
1134 // NOTE: This method is also used by jmap where, if class unloading is
1135 // off, the results can return "false" for legitimate perm objects,
1136 // when we are not in the midst of a sweeping phase, which can result
1137 // in jmap not reporting certain perm gen objects. This will be moot
1138 // if/when the perm gen goes away in the future.
1139 if (_collector->abstract_state() == CMSCollector::Sweeping) {
1140 CMSBitMap* live_map = _collector->markBitMap();
1141 return live_map->par_isMarked((HeapWord*) p);
1142 }
1143 return true;
1144 }
1146 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1147 FreeChunk* fc = (FreeChunk*)p;
1148 assert(is_in_reserved(p), "Should be in space");
1149 assert(_bt.block_start(p) == p, "Should be a block boundary");
1150 if (!fc->is_free()) {
1151 // Ignore mark word because it may have been used to
1152 // chain together promoted objects (the last one
1153 // would have a null value).
1154 assert(oop(p)->is_oop(true), "Should be an oop");
1155 return true;
1156 }
1157 return false;
1158 }
1160 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1161 // approximate answer if you don't hold the freelistlock when you call this.
1162 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1163 size_t size = 0;
1164 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1165 debug_only(
1166 // We may be calling here without the lock in which case we
1167 // won't do this modest sanity check.
1168 if (freelistLock()->owned_by_self()) {
1169 size_t total_list_size = 0;
1170 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1171 fc = fc->next()) {
1172 total_list_size += i;
1173 }
1174 assert(total_list_size == i * _indexedFreeList[i].count(),
1175 "Count in list is incorrect");
1176 }
1177 )
1178 size += i * _indexedFreeList[i].count();
1179 }
1180 return size;
1181 }
1183 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1184 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1185 return allocate(size);
1186 }
1188 HeapWord*
1189 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1190 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1191 }
1193 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1194 assert_lock_strong(freelistLock());
1195 HeapWord* res = NULL;
1196 assert(size == adjustObjectSize(size),
1197 "use adjustObjectSize() before calling into allocate()");
1199 if (_adaptive_freelists) {
1200 res = allocate_adaptive_freelists(size);
1201 } else { // non-adaptive free lists
1202 res = allocate_non_adaptive_freelists(size);
1203 }
1205 if (res != NULL) {
1206 // check that res does lie in this space!
1207 assert(is_in_reserved(res), "Not in this space!");
1208 assert(is_aligned((void*)res), "alignment check");
1210 FreeChunk* fc = (FreeChunk*)res;
1211 fc->markNotFree();
1212 assert(!fc->is_free(), "shouldn't be marked free");
1213 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1214 // Verify that the block offset table shows this to
1215 // be a single block, but not one which is unallocated.
1216 _bt.verify_single_block(res, size);
1217 _bt.verify_not_unallocated(res, size);
1218 // mangle a just allocated object with a distinct pattern.
1219 debug_only(fc->mangleAllocated(size));
1220 }
1222 return res;
1223 }
1225 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1226 HeapWord* res = NULL;
1227 // try and use linear allocation for smaller blocks
1228 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1229 // if successful, the following also adjusts block offset table
1230 res = getChunkFromSmallLinearAllocBlock(size);
1231 }
1232 // Else triage to indexed lists for smaller sizes
1233 if (res == NULL) {
1234 if (size < SmallForDictionary) {
1235 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1236 } else {
1237 // else get it from the big dictionary; if even this doesn't
1238 // work we are out of luck.
1239 res = (HeapWord*)getChunkFromDictionaryExact(size);
1240 }
1241 }
1243 return res;
1244 }
1246 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1247 assert_lock_strong(freelistLock());
1248 HeapWord* res = NULL;
1249 assert(size == adjustObjectSize(size),
1250 "use adjustObjectSize() before calling into allocate()");
1252 // Strategy
1253 // if small
1254 // exact size from small object indexed list if small
1255 // small or large linear allocation block (linAB) as appropriate
1256 // take from lists of greater sized chunks
1257 // else
1258 // dictionary
1259 // small or large linear allocation block if it has the space
1260 // Try allocating exact size from indexTable first
1261 if (size < IndexSetSize) {
1262 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1263 if(res != NULL) {
1264 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1265 "Not removed from free list");
1266 // no block offset table adjustment is necessary on blocks in
1267 // the indexed lists.
1269 // Try allocating from the small LinAB
1270 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1271 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1272 // if successful, the above also adjusts block offset table
1273 // Note that this call will refill the LinAB to
1274 // satisfy the request. This is different that
1275 // evm.
1276 // Don't record chunk off a LinAB? smallSplitBirth(size);
1277 } else {
1278 // Raid the exact free lists larger than size, even if they are not
1279 // overpopulated.
1280 res = (HeapWord*) getChunkFromGreater(size);
1281 }
1282 } else {
1283 // Big objects get allocated directly from the dictionary.
1284 res = (HeapWord*) getChunkFromDictionaryExact(size);
1285 if (res == NULL) {
1286 // Try hard not to fail since an allocation failure will likely
1287 // trigger a synchronous GC. Try to get the space from the
1288 // allocation blocks.
1289 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1290 }
1291 }
1293 return res;
1294 }
1296 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1297 // when promoting obj.
1298 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1299 // Depending on the object size, expansion may require refilling either a
1300 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
1301 // is added because the dictionary may over-allocate to avoid fragmentation.
1302 size_t space = obj_size;
1303 if (!_adaptive_freelists) {
1304 space = MAX2(space, _smallLinearAllocBlock._refillSize);
1305 }
1306 space += _promoInfo.refillSize() + 2 * MinChunkSize;
1307 return space;
1308 }
1310 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1311 FreeChunk* ret;
1313 assert(numWords >= MinChunkSize, "Size is less than minimum");
1314 assert(linearAllocationWouldFail() || bestFitFirst(),
1315 "Should not be here");
1317 size_t i;
1318 size_t currSize = numWords + MinChunkSize;
1319 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1320 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1321 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1322 if (fl->head()) {
1323 ret = getFromListGreater(fl, numWords);
1324 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1325 return ret;
1326 }
1327 }
1329 currSize = MAX2((size_t)SmallForDictionary,
1330 (size_t)(numWords + MinChunkSize));
1332 /* Try to get a chunk that satisfies request, while avoiding
1333 fragmentation that can't be handled. */
1334 {
1335 ret = dictionary()->get_chunk(currSize);
1336 if (ret != NULL) {
1337 assert(ret->size() - numWords >= MinChunkSize,
1338 "Chunk is too small");
1339 _bt.allocated((HeapWord*)ret, ret->size());
1340 /* Carve returned chunk. */
1341 (void) splitChunkAndReturnRemainder(ret, numWords);
1342 /* Label this as no longer a free chunk. */
1343 assert(ret->is_free(), "This chunk should be free");
1344 ret->link_prev(NULL);
1345 }
1346 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1347 return ret;
1348 }
1349 ShouldNotReachHere();
1350 }
1352 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1353 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1354 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1355 }
1357 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1358 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1359 (_smallLinearAllocBlock._word_size == fc->size()),
1360 "Linear allocation block shows incorrect size");
1361 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1362 (_smallLinearAllocBlock._word_size == fc->size()));
1363 }
1365 // Check if the purported free chunk is present either as a linear
1366 // allocation block, the size-indexed table of (smaller) free blocks,
1367 // or the larger free blocks kept in the binary tree dictionary.
1368 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1369 if (verify_chunk_is_linear_alloc_block(fc)) {
1370 return true;
1371 } else if (fc->size() < IndexSetSize) {
1372 return verifyChunkInIndexedFreeLists(fc);
1373 } else {
1374 return dictionary()->verify_chunk_in_free_list(fc);
1375 }
1376 }
1378 #ifndef PRODUCT
1379 void CompactibleFreeListSpace::assert_locked() const {
1380 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1381 }
1383 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1384 CMSLockVerifier::assert_locked(lock);
1385 }
1386 #endif
1388 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1389 // In the parallel case, the main thread holds the free list lock
1390 // on behalf the parallel threads.
1391 FreeChunk* fc;
1392 {
1393 // If GC is parallel, this might be called by several threads.
1394 // This should be rare enough that the locking overhead won't affect
1395 // the sequential code.
1396 MutexLockerEx x(parDictionaryAllocLock(),
1397 Mutex::_no_safepoint_check_flag);
1398 fc = getChunkFromDictionary(size);
1399 }
1400 if (fc != NULL) {
1401 fc->dontCoalesce();
1402 assert(fc->is_free(), "Should be free, but not coalescable");
1403 // Verify that the block offset table shows this to
1404 // be a single block, but not one which is unallocated.
1405 _bt.verify_single_block((HeapWord*)fc, fc->size());
1406 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1407 }
1408 return fc;
1409 }
1411 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1412 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1413 assert_locked();
1415 // if we are tracking promotions, then first ensure space for
1416 // promotion (including spooling space for saving header if necessary).
1417 // then allocate and copy, then track promoted info if needed.
1418 // When tracking (see PromotionInfo::track()), the mark word may
1419 // be displaced and in this case restoration of the mark word
1420 // occurs in the (oop_since_save_marks_)iterate phase.
1421 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1422 return NULL;
1423 }
1424 // Call the allocate(size_t, bool) form directly to avoid the
1425 // additional call through the allocate(size_t) form. Having
1426 // the compile inline the call is problematic because allocate(size_t)
1427 // is a virtual method.
1428 HeapWord* res = allocate(adjustObjectSize(obj_size));
1429 if (res != NULL) {
1430 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1431 // if we should be tracking promotions, do so.
1432 if (_promoInfo.tracking()) {
1433 _promoInfo.track((PromotedObject*)res);
1434 }
1435 }
1436 return oop(res);
1437 }
1439 HeapWord*
1440 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1441 assert_locked();
1442 assert(size >= MinChunkSize, "minimum chunk size");
1443 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1444 "maximum from smallLinearAllocBlock");
1445 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1446 }
1448 HeapWord*
1449 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1450 size_t size) {
1451 assert_locked();
1452 assert(size >= MinChunkSize, "too small");
1453 HeapWord* res = NULL;
1454 // Try to do linear allocation from blk, making sure that
1455 if (blk->_word_size == 0) {
1456 // We have probably been unable to fill this either in the prologue or
1457 // when it was exhausted at the last linear allocation. Bail out until
1458 // next time.
1459 assert(blk->_ptr == NULL, "consistency check");
1460 return NULL;
1461 }
1462 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1463 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1464 if (res != NULL) return res;
1466 // about to exhaust this linear allocation block
1467 if (blk->_word_size == size) { // exactly satisfied
1468 res = blk->_ptr;
1469 _bt.allocated(res, blk->_word_size);
1470 } else if (size + MinChunkSize <= blk->_refillSize) {
1471 size_t sz = blk->_word_size;
1472 // Update _unallocated_block if the size is such that chunk would be
1473 // returned to the indexed free list. All other chunks in the indexed
1474 // free lists are allocated from the dictionary so that _unallocated_block
1475 // has already been adjusted for them. Do it here so that the cost
1476 // for all chunks added back to the indexed free lists.
1477 if (sz < SmallForDictionary) {
1478 _bt.allocated(blk->_ptr, sz);
1479 }
1480 // Return the chunk that isn't big enough, and then refill below.
1481 addChunkToFreeLists(blk->_ptr, sz);
1482 split_birth(sz);
1483 // Don't keep statistics on adding back chunk from a LinAB.
1484 } else {
1485 // A refilled block would not satisfy the request.
1486 return NULL;
1487 }
1489 blk->_ptr = NULL; blk->_word_size = 0;
1490 refillLinearAllocBlock(blk);
1491 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1492 "block was replenished");
1493 if (res != NULL) {
1494 split_birth(size);
1495 repairLinearAllocBlock(blk);
1496 } else if (blk->_ptr != NULL) {
1497 res = blk->_ptr;
1498 size_t blk_size = blk->_word_size;
1499 blk->_word_size -= size;
1500 blk->_ptr += size;
1501 split_birth(size);
1502 repairLinearAllocBlock(blk);
1503 // Update BOT last so that other (parallel) GC threads see a consistent
1504 // view of the BOT and free blocks.
1505 // Above must occur before BOT is updated below.
1506 OrderAccess::storestore();
1507 _bt.split_block(res, blk_size, size); // adjust block offset table
1508 }
1509 return res;
1510 }
1512 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1513 LinearAllocBlock* blk,
1514 size_t size) {
1515 assert_locked();
1516 assert(size >= MinChunkSize, "too small");
1518 HeapWord* res = NULL;
1519 // This is the common case. Keep it simple.
1520 if (blk->_word_size >= size + MinChunkSize) {
1521 assert(blk->_ptr != NULL, "consistency check");
1522 res = blk->_ptr;
1523 // Note that the BOT is up-to-date for the linAB before allocation. It
1524 // indicates the start of the linAB. The split_block() updates the
1525 // BOT for the linAB after the allocation (indicates the start of the
1526 // next chunk to be allocated).
1527 size_t blk_size = blk->_word_size;
1528 blk->_word_size -= size;
1529 blk->_ptr += size;
1530 split_birth(size);
1531 repairLinearAllocBlock(blk);
1532 // Update BOT last so that other (parallel) GC threads see a consistent
1533 // view of the BOT and free blocks.
1534 // Above must occur before BOT is updated below.
1535 OrderAccess::storestore();
1536 _bt.split_block(res, blk_size, size); // adjust block offset table
1537 _bt.allocated(res, size);
1538 }
1539 return res;
1540 }
1542 FreeChunk*
1543 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1544 assert_locked();
1545 assert(size < SmallForDictionary, "just checking");
1546 FreeChunk* res;
1547 res = _indexedFreeList[size].get_chunk_at_head();
1548 if (res == NULL) {
1549 res = getChunkFromIndexedFreeListHelper(size);
1550 }
1551 _bt.verify_not_unallocated((HeapWord*) res, size);
1552 assert(res == NULL || res->size() == size, "Incorrect block size");
1553 return res;
1554 }
1556 FreeChunk*
1557 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1558 bool replenish) {
1559 assert_locked();
1560 FreeChunk* fc = NULL;
1561 if (size < SmallForDictionary) {
1562 assert(_indexedFreeList[size].head() == NULL ||
1563 _indexedFreeList[size].surplus() <= 0,
1564 "List for this size should be empty or under populated");
1565 // Try best fit in exact lists before replenishing the list
1566 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1567 // Replenish list.
1568 //
1569 // Things tried that failed.
1570 // Tried allocating out of the two LinAB's first before
1571 // replenishing lists.
1572 // Tried small linAB of size 256 (size in indexed list)
1573 // and replenishing indexed lists from the small linAB.
1574 //
1575 FreeChunk* newFc = NULL;
1576 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1577 if (replenish_size < SmallForDictionary) {
1578 // Do not replenish from an underpopulated size.
1579 if (_indexedFreeList[replenish_size].surplus() > 0 &&
1580 _indexedFreeList[replenish_size].head() != NULL) {
1581 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1582 } else if (bestFitFirst()) {
1583 newFc = bestFitSmall(replenish_size);
1584 }
1585 }
1586 if (newFc == NULL && replenish_size > size) {
1587 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1588 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1589 }
1590 // Note: The stats update re split-death of block obtained above
1591 // will be recorded below precisely when we know we are going to
1592 // be actually splitting it into more than one pieces below.
1593 if (newFc != NULL) {
1594 if (replenish || CMSReplenishIntermediate) {
1595 // Replenish this list and return one block to caller.
1596 size_t i;
1597 FreeChunk *curFc, *nextFc;
1598 size_t num_blk = newFc->size() / size;
1599 assert(num_blk >= 1, "Smaller than requested?");
1600 assert(newFc->size() % size == 0, "Should be integral multiple of request");
1601 if (num_blk > 1) {
1602 // we are sure we will be splitting the block just obtained
1603 // into multiple pieces; record the split-death of the original
1604 splitDeath(replenish_size);
1605 }
1606 // carve up and link blocks 0, ..., num_blk - 2
1607 // The last chunk is not added to the lists but is returned as the
1608 // free chunk.
1609 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1610 i = 0;
1611 i < (num_blk - 1);
1612 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1613 i++) {
1614 curFc->set_size(size);
1615 // Don't record this as a return in order to try and
1616 // determine the "returns" from a GC.
1617 _bt.verify_not_unallocated((HeapWord*) fc, size);
1618 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1619 _bt.mark_block((HeapWord*)curFc, size);
1620 split_birth(size);
1621 // Don't record the initial population of the indexed list
1622 // as a split birth.
1623 }
1625 // check that the arithmetic was OK above
1626 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1627 "inconsistency in carving newFc");
1628 curFc->set_size(size);
1629 _bt.mark_block((HeapWord*)curFc, size);
1630 split_birth(size);
1631 fc = curFc;
1632 } else {
1633 // Return entire block to caller
1634 fc = newFc;
1635 }
1636 }
1637 }
1638 } else {
1639 // Get a free chunk from the free chunk dictionary to be returned to
1640 // replenish the indexed free list.
1641 fc = getChunkFromDictionaryExact(size);
1642 }
1643 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1644 return fc;
1645 }
1647 FreeChunk*
1648 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1649 assert_locked();
1650 FreeChunk* fc = _dictionary->get_chunk(size,
1651 FreeBlockDictionary<FreeChunk>::atLeast);
1652 if (fc == NULL) {
1653 return NULL;
1654 }
1655 _bt.allocated((HeapWord*)fc, fc->size());
1656 if (fc->size() >= size + MinChunkSize) {
1657 fc = splitChunkAndReturnRemainder(fc, size);
1658 }
1659 assert(fc->size() >= size, "chunk too small");
1660 assert(fc->size() < size + MinChunkSize, "chunk too big");
1661 _bt.verify_single_block((HeapWord*)fc, fc->size());
1662 return fc;
1663 }
1665 FreeChunk*
1666 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1667 assert_locked();
1668 FreeChunk* fc = _dictionary->get_chunk(size,
1669 FreeBlockDictionary<FreeChunk>::atLeast);
1670 if (fc == NULL) {
1671 return fc;
1672 }
1673 _bt.allocated((HeapWord*)fc, fc->size());
1674 if (fc->size() == size) {
1675 _bt.verify_single_block((HeapWord*)fc, size);
1676 return fc;
1677 }
1678 assert(fc->size() > size, "get_chunk() guarantee");
1679 if (fc->size() < size + MinChunkSize) {
1680 // Return the chunk to the dictionary and go get a bigger one.
1681 returnChunkToDictionary(fc);
1682 fc = _dictionary->get_chunk(size + MinChunkSize,
1683 FreeBlockDictionary<FreeChunk>::atLeast);
1684 if (fc == NULL) {
1685 return NULL;
1686 }
1687 _bt.allocated((HeapWord*)fc, fc->size());
1688 }
1689 assert(fc->size() >= size + MinChunkSize, "tautology");
1690 fc = splitChunkAndReturnRemainder(fc, size);
1691 assert(fc->size() == size, "chunk is wrong size");
1692 _bt.verify_single_block((HeapWord*)fc, size);
1693 return fc;
1694 }
1696 void
1697 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1698 assert_locked();
1700 size_t size = chunk->size();
1701 _bt.verify_single_block((HeapWord*)chunk, size);
1702 // adjust _unallocated_block downward, as necessary
1703 _bt.freed((HeapWord*)chunk, size);
1704 _dictionary->return_chunk(chunk);
1705 #ifndef PRODUCT
1706 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1707 TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
1708 TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
1709 tl->verify_stats();
1710 }
1711 #endif // PRODUCT
1712 }
1714 void
1715 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1716 assert_locked();
1717 size_t size = fc->size();
1718 _bt.verify_single_block((HeapWord*) fc, size);
1719 _bt.verify_not_unallocated((HeapWord*) fc, size);
1720 if (_adaptive_freelists) {
1721 _indexedFreeList[size].return_chunk_at_tail(fc);
1722 } else {
1723 _indexedFreeList[size].return_chunk_at_head(fc);
1724 }
1725 #ifndef PRODUCT
1726 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1727 _indexedFreeList[size].verify_stats();
1728 }
1729 #endif // PRODUCT
1730 }
1732 // Add chunk to end of last block -- if it's the largest
1733 // block -- and update BOT and census data. We would
1734 // of course have preferred to coalesce it with the
1735 // last block, but it's currently less expensive to find the
1736 // largest block than it is to find the last.
1737 void
1738 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1739 HeapWord* chunk, size_t size) {
1740 // check that the chunk does lie in this space!
1741 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1742 // One of the parallel gc task threads may be here
1743 // whilst others are allocating.
1744 Mutex* lock = NULL;
1745 if (ParallelGCThreads != 0) {
1746 lock = &_parDictionaryAllocLock;
1747 }
1748 FreeChunk* ec;
1749 {
1750 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1751 ec = dictionary()->find_largest_dict(); // get largest block
1752 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1753 // It's a coterminal block - we can coalesce.
1754 size_t old_size = ec->size();
1755 coalDeath(old_size);
1756 removeChunkFromDictionary(ec);
1757 size += old_size;
1758 } else {
1759 ec = (FreeChunk*)chunk;
1760 }
1761 }
1762 ec->set_size(size);
1763 debug_only(ec->mangleFreed(size));
1764 if (size < SmallForDictionary && ParallelGCThreads != 0) {
1765 lock = _indexedFreeListParLocks[size];
1766 }
1767 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1768 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1769 // record the birth under the lock since the recording involves
1770 // manipulation of the list on which the chunk lives and
1771 // if the chunk is allocated and is the last on the list,
1772 // the list can go away.
1773 coalBirth(size);
1774 }
1776 void
1777 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1778 size_t size) {
1779 // check that the chunk does lie in this space!
1780 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1781 assert_locked();
1782 _bt.verify_single_block(chunk, size);
1784 FreeChunk* fc = (FreeChunk*) chunk;
1785 fc->set_size(size);
1786 debug_only(fc->mangleFreed(size));
1787 if (size < SmallForDictionary) {
1788 returnChunkToFreeList(fc);
1789 } else {
1790 returnChunkToDictionary(fc);
1791 }
1792 }
1794 void
1795 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1796 size_t size, bool coalesced) {
1797 assert_locked();
1798 assert(chunk != NULL, "null chunk");
1799 if (coalesced) {
1800 // repair BOT
1801 _bt.single_block(chunk, size);
1802 }
1803 addChunkToFreeLists(chunk, size);
1804 }
1806 // We _must_ find the purported chunk on our free lists;
1807 // we assert if we don't.
1808 void
1809 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1810 size_t size = fc->size();
1811 assert_locked();
1812 debug_only(verifyFreeLists());
1813 if (size < SmallForDictionary) {
1814 removeChunkFromIndexedFreeList(fc);
1815 } else {
1816 removeChunkFromDictionary(fc);
1817 }
1818 _bt.verify_single_block((HeapWord*)fc, size);
1819 debug_only(verifyFreeLists());
1820 }
1822 void
1823 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1824 size_t size = fc->size();
1825 assert_locked();
1826 assert(fc != NULL, "null chunk");
1827 _bt.verify_single_block((HeapWord*)fc, size);
1828 _dictionary->remove_chunk(fc);
1829 // adjust _unallocated_block upward, as necessary
1830 _bt.allocated((HeapWord*)fc, size);
1831 }
1833 void
1834 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1835 assert_locked();
1836 size_t size = fc->size();
1837 _bt.verify_single_block((HeapWord*)fc, size);
1838 NOT_PRODUCT(
1839 if (FLSVerifyIndexTable) {
1840 verifyIndexedFreeList(size);
1841 }
1842 )
1843 _indexedFreeList[size].remove_chunk(fc);
1844 NOT_PRODUCT(
1845 if (FLSVerifyIndexTable) {
1846 verifyIndexedFreeList(size);
1847 }
1848 )
1849 }
1851 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1852 /* A hint is the next larger size that has a surplus.
1853 Start search at a size large enough to guarantee that
1854 the excess is >= MIN_CHUNK. */
1855 size_t start = align_object_size(numWords + MinChunkSize);
1856 if (start < IndexSetSize) {
1857 AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
1858 size_t hint = _indexedFreeList[start].hint();
1859 while (hint < IndexSetSize) {
1860 assert(hint % MinObjAlignment == 0, "hint should be aligned");
1861 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1862 if (fl->surplus() > 0 && fl->head() != NULL) {
1863 // Found a list with surplus, reset original hint
1864 // and split out a free chunk which is returned.
1865 _indexedFreeList[start].set_hint(hint);
1866 FreeChunk* res = getFromListGreater(fl, numWords);
1867 assert(res == NULL || res->is_free(),
1868 "Should be returning a free chunk");
1869 return res;
1870 }
1871 hint = fl->hint(); /* keep looking */
1872 }
1873 /* None found. */
1874 it[start].set_hint(IndexSetSize);
1875 }
1876 return NULL;
1877 }
1879 /* Requires fl->size >= numWords + MinChunkSize */
1880 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1881 size_t numWords) {
1882 FreeChunk *curr = fl->head();
1883 size_t oldNumWords = curr->size();
1884 assert(numWords >= MinChunkSize, "Word size is too small");
1885 assert(curr != NULL, "List is empty");
1886 assert(oldNumWords >= numWords + MinChunkSize,
1887 "Size of chunks in the list is too small");
1889 fl->remove_chunk(curr);
1890 // recorded indirectly by splitChunkAndReturnRemainder -
1891 // smallSplit(oldNumWords, numWords);
1892 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1893 // Does anything have to be done for the remainder in terms of
1894 // fixing the card table?
1895 assert(new_chunk == NULL || new_chunk->is_free(),
1896 "Should be returning a free chunk");
1897 return new_chunk;
1898 }
1900 FreeChunk*
1901 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1902 size_t new_size) {
1903 assert_locked();
1904 size_t size = chunk->size();
1905 assert(size > new_size, "Split from a smaller block?");
1906 assert(is_aligned(chunk), "alignment problem");
1907 assert(size == adjustObjectSize(size), "alignment problem");
1908 size_t rem_size = size - new_size;
1909 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1910 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1911 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1912 assert(is_aligned(ffc), "alignment problem");
1913 ffc->set_size(rem_size);
1914 ffc->link_next(NULL);
1915 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1916 // Above must occur before BOT is updated below.
1917 // adjust block offset table
1918 OrderAccess::storestore();
1919 assert(chunk->is_free() && ffc->is_free(), "Error");
1920 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1921 if (rem_size < SmallForDictionary) {
1922 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1923 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1924 assert(!is_par ||
1925 (SharedHeap::heap()->n_par_threads() ==
1926 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1927 returnChunkToFreeList(ffc);
1928 split(size, rem_size);
1929 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1930 } else {
1931 returnChunkToDictionary(ffc);
1932 split(size ,rem_size);
1933 }
1934 chunk->set_size(new_size);
1935 return chunk;
1936 }
1938 void
1939 CompactibleFreeListSpace::sweep_completed() {
1940 // Now that space is probably plentiful, refill linear
1941 // allocation blocks as needed.
1942 refillLinearAllocBlocksIfNeeded();
1943 }
1945 void
1946 CompactibleFreeListSpace::gc_prologue() {
1947 assert_locked();
1948 if (PrintFLSStatistics != 0) {
1949 gclog_or_tty->print("Before GC:\n");
1950 reportFreeListStatistics();
1951 }
1952 refillLinearAllocBlocksIfNeeded();
1953 }
1955 void
1956 CompactibleFreeListSpace::gc_epilogue() {
1957 assert_locked();
1958 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1959 if (_smallLinearAllocBlock._word_size == 0)
1960 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1961 }
1962 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1963 _promoInfo.stopTrackingPromotions();
1964 repairLinearAllocationBlocks();
1965 // Print Space's stats
1966 if (PrintFLSStatistics != 0) {
1967 gclog_or_tty->print("After GC:\n");
1968 reportFreeListStatistics();
1969 }
1970 }
1972 // Iteration support, mostly delegated from a CMS generation
1974 void CompactibleFreeListSpace::save_marks() {
1975 assert(Thread::current()->is_VM_thread(),
1976 "Global variable should only be set when single-threaded");
1977 // Mark the "end" of the used space at the time of this call;
1978 // note, however, that promoted objects from this point
1979 // on are tracked in the _promoInfo below.
1980 set_saved_mark_word(unallocated_block());
1981 #ifdef ASSERT
1982 // Check the sanity of save_marks() etc.
1983 MemRegion ur = used_region();
1984 MemRegion urasm = used_region_at_save_marks();
1985 assert(ur.contains(urasm),
1986 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1987 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1988 ur.start(), ur.end(), urasm.start(), urasm.end()));
1989 #endif
1990 // inform allocator that promotions should be tracked.
1991 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1992 _promoInfo.startTrackingPromotions();
1993 }
1995 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1996 assert(_promoInfo.tracking(), "No preceding save_marks?");
1997 assert(SharedHeap::heap()->n_par_threads() == 0,
1998 "Shouldn't be called if using parallel gc.");
1999 return _promoInfo.noPromotions();
2000 }
2002 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2003 \
2004 void CompactibleFreeListSpace:: \
2005 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
2006 assert(SharedHeap::heap()->n_par_threads() == 0, \
2007 "Shouldn't be called (yet) during parallel part of gc."); \
2008 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
2009 /* \
2010 * This also restores any displaced headers and removes the elements from \
2011 * the iteration set as they are processed, so that we have a clean slate \
2012 * at the end of the iteration. Note, thus, that if new objects are \
2013 * promoted as a result of the iteration they are iterated over as well. \
2014 */ \
2015 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
2016 }
2018 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2021 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2022 // ugghh... how would one do this efficiently for a non-contiguous space?
2023 guarantee(false, "NYI");
2024 }
2026 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2027 return _smallLinearAllocBlock._word_size == 0;
2028 }
2030 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2031 // Fix up linear allocation blocks to look like free blocks
2032 repairLinearAllocBlock(&_smallLinearAllocBlock);
2033 }
2035 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2036 assert_locked();
2037 if (blk->_ptr != NULL) {
2038 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2039 "Minimum block size requirement");
2040 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2041 fc->set_size(blk->_word_size);
2042 fc->link_prev(NULL); // mark as free
2043 fc->dontCoalesce();
2044 assert(fc->is_free(), "just marked it free");
2045 assert(fc->cantCoalesce(), "just marked it uncoalescable");
2046 }
2047 }
2049 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2050 assert_locked();
2051 if (_smallLinearAllocBlock._ptr == NULL) {
2052 assert(_smallLinearAllocBlock._word_size == 0,
2053 "Size of linAB should be zero if the ptr is NULL");
2054 // Reset the linAB refill and allocation size limit.
2055 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2056 }
2057 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2058 }
2060 void
2061 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2062 assert_locked();
2063 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2064 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2065 "blk invariant");
2066 if (blk->_ptr == NULL) {
2067 refillLinearAllocBlock(blk);
2068 }
2069 if (PrintMiscellaneous && Verbose) {
2070 if (blk->_word_size == 0) {
2071 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2072 }
2073 }
2074 }
2076 void
2077 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2078 assert_locked();
2079 assert(blk->_word_size == 0 && blk->_ptr == NULL,
2080 "linear allocation block should be empty");
2081 FreeChunk* fc;
2082 if (blk->_refillSize < SmallForDictionary &&
2083 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2084 // A linAB's strategy might be to use small sizes to reduce
2085 // fragmentation but still get the benefits of allocation from a
2086 // linAB.
2087 } else {
2088 fc = getChunkFromDictionary(blk->_refillSize);
2089 }
2090 if (fc != NULL) {
2091 blk->_ptr = (HeapWord*)fc;
2092 blk->_word_size = fc->size();
2093 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
2094 }
2095 }
2097 // Support for concurrent collection policy decisions.
2098 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2099 // In the future we might want to add in frgamentation stats --
2100 // including erosion of the "mountain" into this decision as well.
2101 return !adaptive_freelists() && linearAllocationWouldFail();
2102 }
2104 // Support for compaction
2106 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2107 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2108 // prepare_for_compaction() uses the space between live objects
2109 // so that later phase can skip dead space quickly. So verification
2110 // of the free lists doesn't work after.
2111 }
2113 #define obj_size(q) adjustObjectSize(oop(q)->size())
2114 #define adjust_obj_size(s) adjustObjectSize(s)
2116 void CompactibleFreeListSpace::adjust_pointers() {
2117 // In other versions of adjust_pointers(), a bail out
2118 // based on the amount of live data in the generation
2119 // (i.e., if 0, bail out) may be used.
2120 // Cannot test used() == 0 here because the free lists have already
2121 // been mangled by the compaction.
2123 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2124 // See note about verification in prepare_for_compaction().
2125 }
2127 void CompactibleFreeListSpace::compact() {
2128 SCAN_AND_COMPACT(obj_size);
2129 }
2131 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2132 // where fbs is free block sizes
2133 double CompactibleFreeListSpace::flsFrag() const {
2134 size_t itabFree = totalSizeInIndexedFreeLists();
2135 double frag = 0.0;
2136 size_t i;
2138 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2139 double sz = i;
2140 frag += _indexedFreeList[i].count() * (sz * sz);
2141 }
2143 double totFree = itabFree +
2144 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2145 if (totFree > 0) {
2146 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2147 (totFree * totFree));
2148 frag = (double)1.0 - frag;
2149 } else {
2150 assert(frag == 0.0, "Follows from totFree == 0");
2151 }
2152 return frag;
2153 }
2155 void CompactibleFreeListSpace::beginSweepFLCensus(
2156 float inter_sweep_current,
2157 float inter_sweep_estimate,
2158 float intra_sweep_estimate) {
2159 assert_locked();
2160 size_t i;
2161 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2162 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
2163 if (PrintFLSStatistics > 1) {
2164 gclog_or_tty->print("size[%d] : ", i);
2165 }
2166 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2167 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2168 fl->set_before_sweep(fl->count());
2169 fl->set_bfr_surp(fl->surplus());
2170 }
2171 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2172 inter_sweep_current,
2173 inter_sweep_estimate,
2174 intra_sweep_estimate);
2175 }
2177 void CompactibleFreeListSpace::setFLSurplus() {
2178 assert_locked();
2179 size_t i;
2180 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2181 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2182 fl->set_surplus(fl->count() -
2183 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2184 }
2185 }
2187 void CompactibleFreeListSpace::setFLHints() {
2188 assert_locked();
2189 size_t i;
2190 size_t h = IndexSetSize;
2191 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2192 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2193 fl->set_hint(h);
2194 if (fl->surplus() > 0) {
2195 h = i;
2196 }
2197 }
2198 }
2200 void CompactibleFreeListSpace::clearFLCensus() {
2201 assert_locked();
2202 size_t i;
2203 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2204 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2205 fl->set_prev_sweep(fl->count());
2206 fl->set_coal_births(0);
2207 fl->set_coal_deaths(0);
2208 fl->set_split_births(0);
2209 fl->set_split_deaths(0);
2210 }
2211 }
2213 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2214 if (PrintFLSStatistics > 0) {
2215 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2216 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2217 largestAddr);
2218 }
2219 setFLSurplus();
2220 setFLHints();
2221 if (PrintGC && PrintFLSCensus > 0) {
2222 printFLCensus(sweep_count);
2223 }
2224 clearFLCensus();
2225 assert_locked();
2226 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2227 }
2229 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2230 if (size < SmallForDictionary) {
2231 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2232 return (fl->coal_desired() < 0) ||
2233 ((int)fl->count() > fl->coal_desired());
2234 } else {
2235 return dictionary()->coal_dict_over_populated(size);
2236 }
2237 }
2239 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2240 assert(size < SmallForDictionary, "Size too large for indexed list");
2241 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2242 fl->increment_coal_births();
2243 fl->increment_surplus();
2244 }
2246 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2247 assert(size < SmallForDictionary, "Size too large for indexed list");
2248 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2249 fl->increment_coal_deaths();
2250 fl->decrement_surplus();
2251 }
2253 void CompactibleFreeListSpace::coalBirth(size_t size) {
2254 if (size < SmallForDictionary) {
2255 smallCoalBirth(size);
2256 } else {
2257 dictionary()->dict_census_update(size,
2258 false /* split */,
2259 true /* birth */);
2260 }
2261 }
2263 void CompactibleFreeListSpace::coalDeath(size_t size) {
2264 if(size < SmallForDictionary) {
2265 smallCoalDeath(size);
2266 } else {
2267 dictionary()->dict_census_update(size,
2268 false /* split */,
2269 false /* birth */);
2270 }
2271 }
2273 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2274 assert(size < SmallForDictionary, "Size too large for indexed list");
2275 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2276 fl->increment_split_births();
2277 fl->increment_surplus();
2278 }
2280 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2281 assert(size < SmallForDictionary, "Size too large for indexed list");
2282 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2283 fl->increment_split_deaths();
2284 fl->decrement_surplus();
2285 }
2287 void CompactibleFreeListSpace::split_birth(size_t size) {
2288 if (size < SmallForDictionary) {
2289 smallSplitBirth(size);
2290 } else {
2291 dictionary()->dict_census_update(size,
2292 true /* split */,
2293 true /* birth */);
2294 }
2295 }
2297 void CompactibleFreeListSpace::splitDeath(size_t size) {
2298 if (size < SmallForDictionary) {
2299 smallSplitDeath(size);
2300 } else {
2301 dictionary()->dict_census_update(size,
2302 true /* split */,
2303 false /* birth */);
2304 }
2305 }
2307 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2308 size_t to2 = from - to1;
2309 splitDeath(from);
2310 split_birth(to1);
2311 split_birth(to2);
2312 }
2314 void CompactibleFreeListSpace::print() const {
2315 print_on(tty);
2316 }
2318 void CompactibleFreeListSpace::prepare_for_verify() {
2319 assert_locked();
2320 repairLinearAllocationBlocks();
2321 // Verify that the SpoolBlocks look like free blocks of
2322 // appropriate sizes... To be done ...
2323 }
2325 class VerifyAllBlksClosure: public BlkClosure {
2326 private:
2327 const CompactibleFreeListSpace* _sp;
2328 const MemRegion _span;
2329 HeapWord* _last_addr;
2330 size_t _last_size;
2331 bool _last_was_obj;
2332 bool _last_was_live;
2334 public:
2335 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2336 MemRegion span) : _sp(sp), _span(span),
2337 _last_addr(NULL), _last_size(0),
2338 _last_was_obj(false), _last_was_live(false) { }
2340 virtual size_t do_blk(HeapWord* addr) {
2341 size_t res;
2342 bool was_obj = false;
2343 bool was_live = false;
2344 if (_sp->block_is_obj(addr)) {
2345 was_obj = true;
2346 oop p = oop(addr);
2347 guarantee(p->is_oop(), "Should be an oop");
2348 res = _sp->adjustObjectSize(p->size());
2349 if (_sp->obj_is_alive(addr)) {
2350 was_live = true;
2351 p->verify();
2352 }
2353 } else {
2354 FreeChunk* fc = (FreeChunk*)addr;
2355 res = fc->size();
2356 if (FLSVerifyLists && !fc->cantCoalesce()) {
2357 guarantee(_sp->verify_chunk_in_free_list(fc),
2358 "Chunk should be on a free list");
2359 }
2360 }
2361 if (res == 0) {
2362 gclog_or_tty->print_cr("Livelock: no rank reduction!");
2363 gclog_or_tty->print_cr(
2364 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2365 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2366 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
2367 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2368 _sp->print_on(gclog_or_tty);
2369 guarantee(false, "Seppuku!");
2370 }
2371 _last_addr = addr;
2372 _last_size = res;
2373 _last_was_obj = was_obj;
2374 _last_was_live = was_live;
2375 return res;
2376 }
2377 };
2379 class VerifyAllOopsClosure: public OopClosure {
2380 private:
2381 const CMSCollector* _collector;
2382 const CompactibleFreeListSpace* _sp;
2383 const MemRegion _span;
2384 const bool _past_remark;
2385 const CMSBitMap* _bit_map;
2387 protected:
2388 void do_oop(void* p, oop obj) {
2389 if (_span.contains(obj)) { // the interior oop points into CMS heap
2390 if (!_span.contains(p)) { // reference from outside CMS heap
2391 // Should be a valid object; the first disjunct below allows
2392 // us to sidestep an assertion in block_is_obj() that insists
2393 // that p be in _sp. Note that several generations (and spaces)
2394 // are spanned by _span (CMS heap) above.
2395 guarantee(!_sp->is_in_reserved(obj) ||
2396 _sp->block_is_obj((HeapWord*)obj),
2397 "Should be an object");
2398 guarantee(obj->is_oop(), "Should be an oop");
2399 obj->verify();
2400 if (_past_remark) {
2401 // Remark has been completed, the object should be marked
2402 _bit_map->isMarked((HeapWord*)obj);
2403 }
2404 } else { // reference within CMS heap
2405 if (_past_remark) {
2406 // Remark has been completed -- so the referent should have
2407 // been marked, if referring object is.
2408 if (_bit_map->isMarked(_collector->block_start(p))) {
2409 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2410 }
2411 }
2412 }
2413 } else if (_sp->is_in_reserved(p)) {
2414 // the reference is from FLS, and points out of FLS
2415 guarantee(obj->is_oop(), "Should be an oop");
2416 obj->verify();
2417 }
2418 }
2420 template <class T> void do_oop_work(T* p) {
2421 T heap_oop = oopDesc::load_heap_oop(p);
2422 if (!oopDesc::is_null(heap_oop)) {
2423 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2424 do_oop(p, obj);
2425 }
2426 }
2428 public:
2429 VerifyAllOopsClosure(const CMSCollector* collector,
2430 const CompactibleFreeListSpace* sp, MemRegion span,
2431 bool past_remark, CMSBitMap* bit_map) :
2432 _collector(collector), _sp(sp), _span(span),
2433 _past_remark(past_remark), _bit_map(bit_map) { }
2435 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2436 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2437 };
2439 void CompactibleFreeListSpace::verify() const {
2440 assert_lock_strong(&_freelistLock);
2441 verify_objects_initialized();
2442 MemRegion span = _collector->_span;
2443 bool past_remark = (_collector->abstract_state() ==
2444 CMSCollector::Sweeping);
2446 ResourceMark rm;
2447 HandleMark hm;
2449 // Check integrity of CFL data structures
2450 _promoInfo.verify();
2451 _dictionary->verify();
2452 if (FLSVerifyIndexTable) {
2453 verifyIndexedFreeLists();
2454 }
2455 // Check integrity of all objects and free blocks in space
2456 {
2457 VerifyAllBlksClosure cl(this, span);
2458 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
2459 }
2460 // Check that all references in the heap to FLS
2461 // are to valid objects in FLS or that references in
2462 // FLS are to valid objects elsewhere in the heap
2463 if (FLSVerifyAllHeapReferences)
2464 {
2465 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2466 _collector->markBitMap());
2467 CollectedHeap* ch = Universe::heap();
2469 // Iterate over all oops in the heap. Uses the _no_header version
2470 // since we are not interested in following the klass pointers.
2471 ch->oop_iterate_no_header(&cl);
2472 }
2474 if (VerifyObjectStartArray) {
2475 // Verify the block offset table
2476 _bt.verify();
2477 }
2478 }
2480 #ifndef PRODUCT
2481 void CompactibleFreeListSpace::verifyFreeLists() const {
2482 if (FLSVerifyLists) {
2483 _dictionary->verify();
2484 verifyIndexedFreeLists();
2485 } else {
2486 if (FLSVerifyDictionary) {
2487 _dictionary->verify();
2488 }
2489 if (FLSVerifyIndexTable) {
2490 verifyIndexedFreeLists();
2491 }
2492 }
2493 }
2494 #endif
2496 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2497 size_t i = 0;
2498 for (; i < IndexSetStart; i++) {
2499 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2500 }
2501 for (; i < IndexSetSize; i++) {
2502 verifyIndexedFreeList(i);
2503 }
2504 }
2506 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2507 FreeChunk* fc = _indexedFreeList[size].head();
2508 FreeChunk* tail = _indexedFreeList[size].tail();
2509 size_t num = _indexedFreeList[size].count();
2510 size_t n = 0;
2511 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2512 "Slot should have been empty");
2513 for (; fc != NULL; fc = fc->next(), n++) {
2514 guarantee(fc->size() == size, "Size inconsistency");
2515 guarantee(fc->is_free(), "!free?");
2516 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2517 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2518 }
2519 guarantee(n == num, "Incorrect count");
2520 }
2522 #ifndef PRODUCT
2523 void CompactibleFreeListSpace::check_free_list_consistency() const {
2524 assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
2525 "Some sizes can't be allocated without recourse to"
2526 " linear allocation buffers");
2527 assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
2528 "else MIN_TREE_CHUNK_SIZE is wrong");
2529 assert(IndexSetStart != 0, "IndexSetStart not initialized");
2530 assert(IndexSetStride != 0, "IndexSetStride not initialized");
2531 }
2532 #endif
2534 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2535 assert_lock_strong(&_freelistLock);
2536 AdaptiveFreeList<FreeChunk> total;
2537 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2538 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2539 size_t total_free = 0;
2540 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2541 const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2542 total_free += fl->count() * fl->size();
2543 if (i % (40*IndexSetStride) == 0) {
2544 AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2545 }
2546 fl->print_on(gclog_or_tty);
2547 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
2548 total.set_surplus( total.surplus() + fl->surplus() );
2549 total.set_desired( total.desired() + fl->desired() );
2550 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
2551 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2552 total.set_count( total.count() + fl->count() );
2553 total.set_coal_births( total.coal_births() + fl->coal_births() );
2554 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
2555 total.set_split_births(total.split_births() + fl->split_births());
2556 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2557 }
2558 total.print_on(gclog_or_tty, "TOTAL");
2559 gclog_or_tty->print_cr("Total free in indexed lists "
2560 SIZE_FORMAT " words", total_free);
2561 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2562 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2563 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2564 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2565 _dictionary->print_dict_census();
2566 }
2568 ///////////////////////////////////////////////////////////////////////////
2569 // CFLS_LAB
2570 ///////////////////////////////////////////////////////////////////////////
2572 #define VECTOR_257(x) \
2573 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2574 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2575 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2576 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2577 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2578 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2579 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2580 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2581 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2582 x }
2584 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2585 // OldPLABSize, whose static default is different; if overridden at the
2586 // command-line, this will get reinitialized via a call to
2587 // modify_initialization() below.
2588 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
2589 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2590 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
2591 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2593 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2594 _cfls(cfls)
2595 {
2596 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2597 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2598 i < CompactibleFreeListSpace::IndexSetSize;
2599 i += CompactibleFreeListSpace::IndexSetStride) {
2600 _indexedFreeList[i].set_size(i);
2601 _num_blocks[i] = 0;
2602 }
2603 }
2605 static bool _CFLS_LAB_modified = false;
2607 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2608 assert(!_CFLS_LAB_modified, "Call only once");
2609 _CFLS_LAB_modified = true;
2610 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2611 i < CompactibleFreeListSpace::IndexSetSize;
2612 i += CompactibleFreeListSpace::IndexSetStride) {
2613 _blocks_to_claim[i].modify(n, wt, true /* force */);
2614 }
2615 }
2617 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2618 FreeChunk* res;
2619 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2620 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2621 // This locking manages sync with other large object allocations.
2622 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2623 Mutex::_no_safepoint_check_flag);
2624 res = _cfls->getChunkFromDictionaryExact(word_sz);
2625 if (res == NULL) return NULL;
2626 } else {
2627 AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2628 if (fl->count() == 0) {
2629 // Attempt to refill this local free list.
2630 get_from_global_pool(word_sz, fl);
2631 // If it didn't work, give up.
2632 if (fl->count() == 0) return NULL;
2633 }
2634 res = fl->get_chunk_at_head();
2635 assert(res != NULL, "Why was count non-zero?");
2636 }
2637 res->markNotFree();
2638 assert(!res->is_free(), "shouldn't be marked free");
2639 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2640 // mangle a just allocated object with a distinct pattern.
2641 debug_only(res->mangleAllocated(word_sz));
2642 return (HeapWord*)res;
2643 }
2645 // Get a chunk of blocks of the right size and update related
2646 // book-keeping stats
2647 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2648 // Get the #blocks we want to claim
2649 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2650 assert(n_blks > 0, "Error");
2651 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2652 // In some cases, when the application has a phase change,
2653 // there may be a sudden and sharp shift in the object survival
2654 // profile, and updating the counts at the end of a scavenge
2655 // may not be quick enough, giving rise to large scavenge pauses
2656 // during these phase changes. It is beneficial to detect such
2657 // changes on-the-fly during a scavenge and avoid such a phase-change
2658 // pothole. The following code is a heuristic attempt to do that.
2659 // It is protected by a product flag until we have gained
2660 // enough experience with this heuristic and fine-tuned its behaviour.
2661 // WARNING: This might increase fragmentation if we overreact to
2662 // small spikes, so some kind of historical smoothing based on
2663 // previous experience with the greater reactivity might be useful.
2664 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2665 // default.
2666 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2667 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2668 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
2669 n_blks = MIN2(n_blks, CMSOldPLABMax);
2670 }
2671 assert(n_blks > 0, "Error");
2672 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2673 // Update stats table entry for this block size
2674 _num_blocks[word_sz] += fl->count();
2675 }
2677 void CFLS_LAB::compute_desired_plab_size() {
2678 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2679 i < CompactibleFreeListSpace::IndexSetSize;
2680 i += CompactibleFreeListSpace::IndexSetStride) {
2681 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2682 "Counter inconsistency");
2683 if (_global_num_workers[i] > 0) {
2684 // Need to smooth wrt historical average
2685 if (ResizeOldPLAB) {
2686 _blocks_to_claim[i].sample(
2687 MAX2((size_t)CMSOldPLABMin,
2688 MIN2((size_t)CMSOldPLABMax,
2689 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2690 }
2691 // Reset counters for next round
2692 _global_num_workers[i] = 0;
2693 _global_num_blocks[i] = 0;
2694 if (PrintOldPLAB) {
2695 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2696 }
2697 }
2698 }
2699 }
2701 // If this is changed in the future to allow parallel
2702 // access, one would need to take the FL locks and,
2703 // depending on how it is used, stagger access from
2704 // parallel threads to reduce contention.
2705 void CFLS_LAB::retire(int tid) {
2706 // We run this single threaded with the world stopped;
2707 // so no need for locks and such.
2708 NOT_PRODUCT(Thread* t = Thread::current();)
2709 assert(Thread::current()->is_VM_thread(), "Error");
2710 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2711 i < CompactibleFreeListSpace::IndexSetSize;
2712 i += CompactibleFreeListSpace::IndexSetStride) {
2713 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2714 "Can't retire more than what we obtained");
2715 if (_num_blocks[i] > 0) {
2716 size_t num_retire = _indexedFreeList[i].count();
2717 assert(_num_blocks[i] > num_retire, "Should have used at least one");
2718 {
2719 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2720 // Mutex::_no_safepoint_check_flag);
2722 // Update globals stats for num_blocks used
2723 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2724 _global_num_workers[i]++;
2725 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2726 if (num_retire > 0) {
2727 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2728 // Reset this list.
2729 _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2730 _indexedFreeList[i].set_size(i);
2731 }
2732 }
2733 if (PrintOldPLAB) {
2734 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2735 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2736 }
2737 // Reset stats for next round
2738 _num_blocks[i] = 0;
2739 }
2740 }
2741 }
2743 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2744 assert(fl->count() == 0, "Precondition.");
2745 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2746 "Precondition");
2748 // We'll try all multiples of word_sz in the indexed set, starting with
2749 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2750 // then try getting a big chunk and splitting it.
2751 {
2752 bool found;
2753 int k;
2754 size_t cur_sz;
2755 for (k = 1, cur_sz = k * word_sz, found = false;
2756 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2757 (CMSSplitIndexedFreeListBlocks || k <= 1);
2758 k++, cur_sz = k * word_sz) {
2759 AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
2760 fl_for_cur_sz.set_size(cur_sz);
2761 {
2762 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2763 Mutex::_no_safepoint_check_flag);
2764 AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2765 if (gfl->count() != 0) {
2766 // nn is the number of chunks of size cur_sz that
2767 // we'd need to split k-ways each, in order to create
2768 // "n" chunks of size word_sz each.
2769 const size_t nn = MAX2(n/k, (size_t)1);
2770 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2771 found = true;
2772 if (k > 1) {
2773 // Update split death stats for the cur_sz-size blocks list:
2774 // we increment the split death count by the number of blocks
2775 // we just took from the cur_sz-size blocks list and which
2776 // we will be splitting below.
2777 ssize_t deaths = gfl->split_deaths() +
2778 fl_for_cur_sz.count();
2779 gfl->set_split_deaths(deaths);
2780 }
2781 }
2782 }
2783 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2784 if (found) {
2785 if (k == 1) {
2786 fl->prepend(&fl_for_cur_sz);
2787 } else {
2788 // Divide each block on fl_for_cur_sz up k ways.
2789 FreeChunk* fc;
2790 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2791 // Must do this in reverse order, so that anybody attempting to
2792 // access the main chunk sees it as a single free block until we
2793 // change it.
2794 size_t fc_size = fc->size();
2795 assert(fc->is_free(), "Error");
2796 for (int i = k-1; i >= 0; i--) {
2797 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2798 assert((i != 0) ||
2799 ((fc == ffc) && ffc->is_free() &&
2800 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2801 "Counting error");
2802 ffc->set_size(word_sz);
2803 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2804 ffc->link_next(NULL);
2805 // Above must occur before BOT is updated below.
2806 OrderAccess::storestore();
2807 // splitting from the right, fc_size == i * word_sz
2808 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2809 fc_size -= word_sz;
2810 assert(fc_size == i*word_sz, "Error");
2811 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2812 _bt.verify_single_block((HeapWord*)fc, fc_size);
2813 _bt.verify_single_block((HeapWord*)ffc, word_sz);
2814 // Push this on "fl".
2815 fl->return_chunk_at_head(ffc);
2816 }
2817 // TRAP
2818 assert(fl->tail()->next() == NULL, "List invariant.");
2819 }
2820 }
2821 // Update birth stats for this block size.
2822 size_t num = fl->count();
2823 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2824 Mutex::_no_safepoint_check_flag);
2825 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2826 _indexedFreeList[word_sz].set_split_births(births);
2827 return;
2828 }
2829 }
2830 }
2831 // Otherwise, we'll split a block from the dictionary.
2832 FreeChunk* fc = NULL;
2833 FreeChunk* rem_fc = NULL;
2834 size_t rem;
2835 {
2836 MutexLockerEx x(parDictionaryAllocLock(),
2837 Mutex::_no_safepoint_check_flag);
2838 while (n > 0) {
2839 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2840 FreeBlockDictionary<FreeChunk>::atLeast);
2841 if (fc != NULL) {
2842 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
2843 dictionary()->dict_census_update(fc->size(),
2844 true /*split*/,
2845 false /*birth*/);
2846 break;
2847 } else {
2848 n--;
2849 }
2850 }
2851 if (fc == NULL) return;
2852 // Otherwise, split up that block.
2853 assert((ssize_t)n >= 1, "Control point invariant");
2854 assert(fc->is_free(), "Error: should be a free block");
2855 _bt.verify_single_block((HeapWord*)fc, fc->size());
2856 const size_t nn = fc->size() / word_sz;
2857 n = MIN2(nn, n);
2858 assert((ssize_t)n >= 1, "Control point invariant");
2859 rem = fc->size() - n * word_sz;
2860 // If there is a remainder, and it's too small, allocate one fewer.
2861 if (rem > 0 && rem < MinChunkSize) {
2862 n--; rem += word_sz;
2863 }
2864 // Note that at this point we may have n == 0.
2865 assert((ssize_t)n >= 0, "Control point invariant");
2867 // If n is 0, the chunk fc that was found is not large
2868 // enough to leave a viable remainder. We are unable to
2869 // allocate even one block. Return fc to the
2870 // dictionary and return, leaving "fl" empty.
2871 if (n == 0) {
2872 returnChunkToDictionary(fc);
2873 assert(fl->count() == 0, "We never allocated any blocks");
2874 return;
2875 }
2877 // First return the remainder, if any.
2878 // Note that we hold the lock until we decide if we're going to give
2879 // back the remainder to the dictionary, since a concurrent allocation
2880 // may otherwise see the heap as empty. (We're willing to take that
2881 // hit if the block is a small block.)
2882 if (rem > 0) {
2883 size_t prefix_size = n * word_sz;
2884 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2885 rem_fc->set_size(rem);
2886 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2887 rem_fc->link_next(NULL);
2888 // Above must occur before BOT is updated below.
2889 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2890 OrderAccess::storestore();
2891 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2892 assert(fc->is_free(), "Error");
2893 fc->set_size(prefix_size);
2894 if (rem >= IndexSetSize) {
2895 returnChunkToDictionary(rem_fc);
2896 dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2897 rem_fc = NULL;
2898 }
2899 // Otherwise, return it to the small list below.
2900 }
2901 }
2902 if (rem_fc != NULL) {
2903 MutexLockerEx x(_indexedFreeListParLocks[rem],
2904 Mutex::_no_safepoint_check_flag);
2905 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2906 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2907 smallSplitBirth(rem);
2908 }
2909 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2910 // Now do the splitting up.
2911 // Must do this in reverse order, so that anybody attempting to
2912 // access the main chunk sees it as a single free block until we
2913 // change it.
2914 size_t fc_size = n * word_sz;
2915 // All but first chunk in this loop
2916 for (ssize_t i = n-1; i > 0; i--) {
2917 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2918 ffc->set_size(word_sz);
2919 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2920 ffc->link_next(NULL);
2921 // Above must occur before BOT is updated below.
2922 OrderAccess::storestore();
2923 // splitting from the right, fc_size == (n - i + 1) * wordsize
2924 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2925 fc_size -= word_sz;
2926 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2927 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2928 _bt.verify_single_block((HeapWord*)fc, fc_size);
2929 // Push this on "fl".
2930 fl->return_chunk_at_head(ffc);
2931 }
2932 // First chunk
2933 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2934 // The blocks above should show their new sizes before the first block below
2935 fc->set_size(word_sz);
2936 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
2937 fc->link_next(NULL);
2938 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2939 _bt.verify_single_block((HeapWord*)fc, fc->size());
2940 fl->return_chunk_at_head(fc);
2942 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2943 {
2944 // Update the stats for this block size.
2945 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2946 Mutex::_no_safepoint_check_flag);
2947 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2948 _indexedFreeList[word_sz].set_split_births(births);
2949 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2950 // _indexedFreeList[word_sz].set_surplus(new_surplus);
2951 }
2953 // TRAP
2954 assert(fl->tail()->next() == NULL, "List invariant.");
2955 }
2957 // Set up the space's par_seq_tasks structure for work claiming
2958 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2959 // XXX Need to suitably abstract and generalize this and the next
2960 // method into one.
2961 void
2962 CompactibleFreeListSpace::
2963 initialize_sequential_subtasks_for_rescan(int n_threads) {
2964 // The "size" of each task is fixed according to rescan_task_size.
2965 assert(n_threads > 0, "Unexpected n_threads argument");
2966 const size_t task_size = rescan_task_size();
2967 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2968 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2969 assert(n_tasks == 0 ||
2970 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2971 (used_region().start() + n_tasks*task_size >= used_region().end())),
2972 "n_tasks calculation incorrect");
2973 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2974 assert(!pst->valid(), "Clobbering existing data?");
2975 // Sets the condition for completion of the subtask (how many threads
2976 // need to finish in order to be done).
2977 pst->set_n_threads(n_threads);
2978 pst->set_n_tasks((int)n_tasks);
2979 }
2981 // Set up the space's par_seq_tasks structure for work claiming
2982 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2983 void
2984 CompactibleFreeListSpace::
2985 initialize_sequential_subtasks_for_marking(int n_threads,
2986 HeapWord* low) {
2987 // The "size" of each task is fixed according to rescan_task_size.
2988 assert(n_threads > 0, "Unexpected n_threads argument");
2989 const size_t task_size = marking_task_size();
2990 assert(task_size > CardTableModRefBS::card_size_in_words &&
2991 (task_size % CardTableModRefBS::card_size_in_words == 0),
2992 "Otherwise arithmetic below would be incorrect");
2993 MemRegion span = _gen->reserved();
2994 if (low != NULL) {
2995 if (span.contains(low)) {
2996 // Align low down to a card boundary so that
2997 // we can use block_offset_careful() on span boundaries.
2998 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2999 CardTableModRefBS::card_size);
3000 // Clip span prefix at aligned_low
3001 span = span.intersection(MemRegion(aligned_low, span.end()));
3002 } else if (low > span.end()) {
3003 span = MemRegion(low, low); // Null region
3004 } // else use entire span
3005 }
3006 assert(span.is_empty() ||
3007 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
3008 "span should start at a card boundary");
3009 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3010 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3011 assert(n_tasks == 0 ||
3012 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3013 (span.start() + n_tasks*task_size >= span.end())),
3014 "n_tasks calculation incorrect");
3015 SequentialSubTasksDone* pst = conc_par_seq_tasks();
3016 assert(!pst->valid(), "Clobbering existing data?");
3017 // Sets the condition for completion of the subtask (how many threads
3018 // need to finish in order to be done).
3019 pst->set_n_threads(n_threads);
3020 pst->set_n_tasks((int)n_tasks);
3021 }