Thu, 27 May 2010 19:08:38 -0700
6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair
1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_compactibleFreeListSpace.cpp.incl"
28 /////////////////////////////////////////////////////////////////////////
29 //// CompactibleFreeListSpace
30 /////////////////////////////////////////////////////////////////////////
32 // highest ranked free list lock rank
33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
35 // Constructor
36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
37 MemRegion mr, bool use_adaptive_freelists,
38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
39 _dictionaryChoice(dictionaryChoice),
40 _adaptive_freelists(use_adaptive_freelists),
41 _bt(bs, mr),
42 // free list locks are in the range of values taken by _lockRank
43 // This range currently is [_leaf+2, _leaf+3]
44 // Note: this requires that CFLspace c'tors
45 // are called serially in the order in which the locks are
46 // are acquired in the program text. This is true today.
47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
49 "CompactibleFreeListSpace._dict_par_lock", true),
50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
51 CMSRescanMultiple),
52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
53 CMSConcMarkMultiple),
54 _collector(NULL)
55 {
56 _bt.set_space(this);
57 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
58 // We have all of "mr", all of which we place in the dictionary
59 // as one big chunk. We'll need to decide here which of several
60 // possible alternative dictionary implementations to use. For
61 // now the choice is easy, since we have only one working
62 // implementation, namely, the simple binary tree (splaying
63 // temporarily disabled).
64 switch (dictionaryChoice) {
65 case FreeBlockDictionary::dictionarySplayTree:
66 case FreeBlockDictionary::dictionarySkipList:
67 default:
68 warning("dictionaryChoice: selected option not understood; using"
69 " default BinaryTreeDictionary implementation instead.");
70 case FreeBlockDictionary::dictionaryBinaryTree:
71 _dictionary = new BinaryTreeDictionary(mr);
72 break;
73 }
74 assert(_dictionary != NULL, "CMS dictionary initialization");
75 // The indexed free lists are initially all empty and are lazily
76 // filled in on demand. Initialize the array elements to NULL.
77 initializeIndexedFreeListArray();
79 // Not using adaptive free lists assumes that allocation is first
80 // from the linAB's. Also a cms perm gen which can be compacted
81 // has to have the klass's klassKlass allocated at a lower
82 // address in the heap than the klass so that the klassKlass is
83 // moved to its new location before the klass is moved.
84 // Set the _refillSize for the linear allocation blocks
85 if (!use_adaptive_freelists) {
86 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
87 // The small linAB initially has all the space and will allocate
88 // a chunk of any size.
89 HeapWord* addr = (HeapWord*) fc;
90 _smallLinearAllocBlock.set(addr, fc->size() ,
91 1024*SmallForLinearAlloc, fc->size());
92 // Note that _unallocated_block is not updated here.
93 // Allocations from the linear allocation block should
94 // update it.
95 } else {
96 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
97 SmallForLinearAlloc);
98 }
99 // CMSIndexedFreeListReplenish should be at least 1
100 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
101 _promoInfo.setSpace(this);
102 if (UseCMSBestFit) {
103 _fitStrategy = FreeBlockBestFitFirst;
104 } else {
105 _fitStrategy = FreeBlockStrategyNone;
106 }
107 checkFreeListConsistency();
109 // Initialize locks for parallel case.
110 if (ParallelGCThreads > 0) {
111 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
112 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
113 "a freelist par lock",
114 true);
115 if (_indexedFreeListParLocks[i] == NULL)
116 vm_exit_during_initialization("Could not allocate a par lock");
117 DEBUG_ONLY(
118 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
119 )
120 }
121 _dictionary->set_par_lock(&_parDictionaryAllocLock);
122 }
123 }
125 // Like CompactibleSpace forward() but always calls cross_threshold() to
126 // update the block offset table. Removed initialize_threshold call because
127 // CFLS does not use a block offset array for contiguous spaces.
128 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
129 CompactPoint* cp, HeapWord* compact_top) {
130 // q is alive
131 // First check if we should switch compaction space
132 assert(this == cp->space, "'this' should be current compaction space.");
133 size_t compaction_max_size = pointer_delta(end(), compact_top);
134 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
135 "virtual adjustObjectSize_v() method is not correct");
136 size_t adjusted_size = adjustObjectSize(size);
137 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
138 "no small fragments allowed");
139 assert(minimum_free_block_size() == MinChunkSize,
140 "for de-virtualized reference below");
141 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
142 if (adjusted_size + MinChunkSize > compaction_max_size &&
143 adjusted_size != compaction_max_size) {
144 do {
145 // switch to next compaction space
146 cp->space->set_compaction_top(compact_top);
147 cp->space = cp->space->next_compaction_space();
148 if (cp->space == NULL) {
149 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
150 assert(cp->gen != NULL, "compaction must succeed");
151 cp->space = cp->gen->first_compaction_space();
152 assert(cp->space != NULL, "generation must have a first compaction space");
153 }
154 compact_top = cp->space->bottom();
155 cp->space->set_compaction_top(compact_top);
156 // The correct adjusted_size may not be the same as that for this method
157 // (i.e., cp->space may no longer be "this" so adjust the size again.
158 // Use the virtual method which is not used above to save the virtual
159 // dispatch.
160 adjusted_size = cp->space->adjust_object_size_v(size);
161 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
162 assert(cp->space->minimum_free_block_size() == 0, "just checking");
163 } while (adjusted_size > compaction_max_size);
164 }
166 // store the forwarding pointer into the mark word
167 if ((HeapWord*)q != compact_top) {
168 q->forward_to(oop(compact_top));
169 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
170 } else {
171 // if the object isn't moving we can just set the mark to the default
172 // mark and handle it specially later on.
173 q->init_mark();
174 assert(q->forwardee() == NULL, "should be forwarded to NULL");
175 }
177 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
178 compact_top += adjusted_size;
180 // we need to update the offset table so that the beginnings of objects can be
181 // found during scavenge. Note that we are updating the offset table based on
182 // where the object will be once the compaction phase finishes.
184 // Always call cross_threshold(). A contiguous space can only call it when
185 // the compaction_top exceeds the current threshold but not for an
186 // non-contiguous space.
187 cp->threshold =
188 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
189 return compact_top;
190 }
192 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
193 // and use of single_block instead of alloc_block. The name here is not really
194 // appropriate - maybe a more general name could be invented for both the
195 // contiguous and noncontiguous spaces.
197 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
198 _bt.single_block(start, the_end);
199 return end();
200 }
202 // Initialize them to NULL.
203 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
204 for (size_t i = 0; i < IndexSetSize; i++) {
205 // Note that on platforms where objects are double word aligned,
206 // the odd array elements are not used. It is convenient, however,
207 // to map directly from the object size to the array element.
208 _indexedFreeList[i].reset(IndexSetSize);
209 _indexedFreeList[i].set_size(i);
210 assert(_indexedFreeList[i].count() == 0, "reset check failed");
211 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
212 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
213 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
214 }
215 }
217 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
218 for (int i = 1; i < IndexSetSize; i++) {
219 assert(_indexedFreeList[i].size() == (size_t) i,
220 "Indexed free list sizes are incorrect");
221 _indexedFreeList[i].reset(IndexSetSize);
222 assert(_indexedFreeList[i].count() == 0, "reset check failed");
223 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
224 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
225 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
226 }
227 }
229 void CompactibleFreeListSpace::reset(MemRegion mr) {
230 resetIndexedFreeListArray();
231 dictionary()->reset();
232 if (BlockOffsetArrayUseUnallocatedBlock) {
233 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
234 // Everything's allocated until proven otherwise.
235 _bt.set_unallocated_block(end());
236 }
237 if (!mr.is_empty()) {
238 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
239 _bt.single_block(mr.start(), mr.word_size());
240 FreeChunk* fc = (FreeChunk*) mr.start();
241 fc->setSize(mr.word_size());
242 if (mr.word_size() >= IndexSetSize ) {
243 returnChunkToDictionary(fc);
244 } else {
245 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
246 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
247 }
248 }
249 _promoInfo.reset();
250 _smallLinearAllocBlock._ptr = NULL;
251 _smallLinearAllocBlock._word_size = 0;
252 }
254 void CompactibleFreeListSpace::reset_after_compaction() {
255 // Reset the space to the new reality - one free chunk.
256 MemRegion mr(compaction_top(), end());
257 reset(mr);
258 // Now refill the linear allocation block(s) if possible.
259 if (_adaptive_freelists) {
260 refillLinearAllocBlocksIfNeeded();
261 } else {
262 // Place as much of mr in the linAB as we can get,
263 // provided it was big enough to go into the dictionary.
264 FreeChunk* fc = dictionary()->findLargestDict();
265 if (fc != NULL) {
266 assert(fc->size() == mr.word_size(),
267 "Why was the chunk broken up?");
268 removeChunkFromDictionary(fc);
269 HeapWord* addr = (HeapWord*) fc;
270 _smallLinearAllocBlock.set(addr, fc->size() ,
271 1024*SmallForLinearAlloc, fc->size());
272 // Note that _unallocated_block is not updated here.
273 }
274 }
275 }
277 // Walks the entire dictionary, returning a coterminal
278 // chunk, if it exists. Use with caution since it involves
279 // a potentially complete walk of a potentially large tree.
280 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
282 assert_lock_strong(&_freelistLock);
284 return dictionary()->find_chunk_ends_at(end());
285 }
288 #ifndef PRODUCT
289 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
290 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
291 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
292 }
293 }
295 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
296 size_t sum = 0;
297 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
298 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
299 }
300 return sum;
301 }
303 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
304 size_t count = 0;
305 for (int i = MinChunkSize; i < IndexSetSize; i++) {
306 debug_only(
307 ssize_t total_list_count = 0;
308 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
309 fc = fc->next()) {
310 total_list_count++;
311 }
312 assert(total_list_count == _indexedFreeList[i].count(),
313 "Count in list is incorrect");
314 )
315 count += _indexedFreeList[i].count();
316 }
317 return count;
318 }
320 size_t CompactibleFreeListSpace::totalCount() {
321 size_t num = totalCountInIndexedFreeLists();
322 num += dictionary()->totalCount();
323 if (_smallLinearAllocBlock._word_size != 0) {
324 num++;
325 }
326 return num;
327 }
328 #endif
330 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
331 FreeChunk* fc = (FreeChunk*) p;
332 return fc->isFree();
333 }
335 size_t CompactibleFreeListSpace::used() const {
336 return capacity() - free();
337 }
339 size_t CompactibleFreeListSpace::free() const {
340 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
341 // if you do this while the structures are in flux you
342 // may get an approximate answer only; for instance
343 // because there is concurrent allocation either
344 // directly by mutators or for promotion during a GC.
345 // It's "MT-safe", however, in the sense that you are guaranteed
346 // not to crash and burn, for instance, because of walking
347 // pointers that could disappear as you were walking them.
348 // The approximation is because the various components
349 // that are read below are not read atomically (and
350 // further the computation of totalSizeInIndexedFreeLists()
351 // is itself a non-atomic computation. The normal use of
352 // this is during a resize operation at the end of GC
353 // and at that time you are guaranteed to get the
354 // correct actual value. However, for instance, this is
355 // also read completely asynchronously by the "perf-sampler"
356 // that supports jvmstat, and you are apt to see the values
357 // flicker in such cases.
358 assert(_dictionary != NULL, "No _dictionary?");
359 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
360 totalSizeInIndexedFreeLists() +
361 _smallLinearAllocBlock._word_size) * HeapWordSize;
362 }
364 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
365 assert(_dictionary != NULL, "No _dictionary?");
366 assert_locked();
367 size_t res = _dictionary->maxChunkSize();
368 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
369 (size_t) SmallForLinearAlloc - 1));
370 // XXX the following could potentially be pretty slow;
371 // should one, pesimally for the rare cases when res
372 // caclulated above is less than IndexSetSize,
373 // just return res calculated above? My reasoning was that
374 // those cases will be so rare that the extra time spent doesn't
375 // really matter....
376 // Note: do not change the loop test i >= res + IndexSetStride
377 // to i > res below, because i is unsigned and res may be zero.
378 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
379 i -= IndexSetStride) {
380 if (_indexedFreeList[i].head() != NULL) {
381 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
382 return i;
383 }
384 }
385 return res;
386 }
388 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
389 const {
390 reportIndexedFreeListStatistics();
391 gclog_or_tty->print_cr("Layout of Indexed Freelists");
392 gclog_or_tty->print_cr("---------------------------");
393 FreeList::print_labels_on(st, "size");
394 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
395 _indexedFreeList[i].print_on(gclog_or_tty);
396 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
397 fc = fc->next()) {
398 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
399 fc, (HeapWord*)fc + i,
400 fc->cantCoalesce() ? "\t CC" : "");
401 }
402 }
403 }
405 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
406 const {
407 _promoInfo.print_on(st);
408 }
410 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
411 const {
412 _dictionary->reportStatistics();
413 st->print_cr("Layout of Freelists in Tree");
414 st->print_cr("---------------------------");
415 _dictionary->print_free_lists(st);
416 }
418 class BlkPrintingClosure: public BlkClosure {
419 const CMSCollector* _collector;
420 const CompactibleFreeListSpace* _sp;
421 const CMSBitMap* _live_bit_map;
422 const bool _post_remark;
423 outputStream* _st;
424 public:
425 BlkPrintingClosure(const CMSCollector* collector,
426 const CompactibleFreeListSpace* sp,
427 const CMSBitMap* live_bit_map,
428 outputStream* st):
429 _collector(collector),
430 _sp(sp),
431 _live_bit_map(live_bit_map),
432 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
433 _st(st) { }
434 size_t do_blk(HeapWord* addr);
435 };
437 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
438 size_t sz = _sp->block_size_no_stall(addr, _collector);
439 assert(sz != 0, "Should always be able to compute a size");
440 if (_sp->block_is_obj(addr)) {
441 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
442 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
443 addr,
444 dead ? "dead" : "live",
445 sz,
446 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
447 if (CMSPrintObjectsInDump && !dead) {
448 oop(addr)->print_on(_st);
449 _st->print_cr("--------------------------------------");
450 }
451 } else { // free block
452 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
453 addr, sz, CMSPrintChunksInDump ? ":" : ".");
454 if (CMSPrintChunksInDump) {
455 ((FreeChunk*)addr)->print_on(_st);
456 _st->print_cr("--------------------------------------");
457 }
458 }
459 return sz;
460 }
462 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
463 outputStream* st) {
464 st->print_cr("\n=========================");
465 st->print_cr("Block layout in CMS Heap:");
466 st->print_cr("=========================");
467 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
468 blk_iterate(&bpcl);
470 st->print_cr("\n=======================================");
471 st->print_cr("Order & Layout of Promotion Info Blocks");
472 st->print_cr("=======================================");
473 print_promo_info_blocks(st);
475 st->print_cr("\n===========================");
476 st->print_cr("Order of Indexed Free Lists");
477 st->print_cr("=========================");
478 print_indexed_free_lists(st);
480 st->print_cr("\n=================================");
481 st->print_cr("Order of Free Lists in Dictionary");
482 st->print_cr("=================================");
483 print_dictionary_free_lists(st);
484 }
487 void CompactibleFreeListSpace::reportFreeListStatistics() const {
488 assert_lock_strong(&_freelistLock);
489 assert(PrintFLSStatistics != 0, "Reporting error");
490 _dictionary->reportStatistics();
491 if (PrintFLSStatistics > 1) {
492 reportIndexedFreeListStatistics();
493 size_t totalSize = totalSizeInIndexedFreeLists() +
494 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
495 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
496 }
497 }
499 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
500 assert_lock_strong(&_freelistLock);
501 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
502 "--------------------------------\n");
503 size_t totalSize = totalSizeInIndexedFreeLists();
504 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
505 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
506 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
507 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
508 if (freeBlocks != 0) {
509 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
510 }
511 }
513 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
514 size_t res = 0;
515 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
516 debug_only(
517 ssize_t recount = 0;
518 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
519 fc = fc->next()) {
520 recount += 1;
521 }
522 assert(recount == _indexedFreeList[i].count(),
523 "Incorrect count in list");
524 )
525 res += _indexedFreeList[i].count();
526 }
527 return res;
528 }
530 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
531 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
532 if (_indexedFreeList[i].head() != NULL) {
533 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
534 return (size_t)i;
535 }
536 }
537 return 0;
538 }
540 void CompactibleFreeListSpace::set_end(HeapWord* value) {
541 HeapWord* prevEnd = end();
542 assert(prevEnd != value, "unnecessary set_end call");
543 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
544 _end = value;
545 if (prevEnd != NULL) {
546 // Resize the underlying block offset table.
547 _bt.resize(pointer_delta(value, bottom()));
548 if (value <= prevEnd) {
549 assert(value >= unallocated_block(), "New end is below unallocated block");
550 } else {
551 // Now, take this new chunk and add it to the free blocks.
552 // Note that the BOT has not yet been updated for this block.
553 size_t newFcSize = pointer_delta(value, prevEnd);
554 // XXX This is REALLY UGLY and should be fixed up. XXX
555 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
556 // Mark the boundary of the new block in BOT
557 _bt.mark_block(prevEnd, value);
558 // put it all in the linAB
559 if (ParallelGCThreads == 0) {
560 _smallLinearAllocBlock._ptr = prevEnd;
561 _smallLinearAllocBlock._word_size = newFcSize;
562 repairLinearAllocBlock(&_smallLinearAllocBlock);
563 } else { // ParallelGCThreads > 0
564 MutexLockerEx x(parDictionaryAllocLock(),
565 Mutex::_no_safepoint_check_flag);
566 _smallLinearAllocBlock._ptr = prevEnd;
567 _smallLinearAllocBlock._word_size = newFcSize;
568 repairLinearAllocBlock(&_smallLinearAllocBlock);
569 }
570 // Births of chunks put into a LinAB are not recorded. Births
571 // of chunks as they are allocated out of a LinAB are.
572 } else {
573 // Add the block to the free lists, if possible coalescing it
574 // with the last free block, and update the BOT and census data.
575 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
576 }
577 }
578 }
579 }
581 class FreeListSpace_DCTOC : public Filtering_DCTOC {
582 CompactibleFreeListSpace* _cfls;
583 CMSCollector* _collector;
584 protected:
585 // Override.
586 #define walk_mem_region_with_cl_DECL(ClosureType) \
587 virtual void walk_mem_region_with_cl(MemRegion mr, \
588 HeapWord* bottom, HeapWord* top, \
589 ClosureType* cl); \
590 void walk_mem_region_with_cl_par(MemRegion mr, \
591 HeapWord* bottom, HeapWord* top, \
592 ClosureType* cl); \
593 void walk_mem_region_with_cl_nopar(MemRegion mr, \
594 HeapWord* bottom, HeapWord* top, \
595 ClosureType* cl)
596 walk_mem_region_with_cl_DECL(OopClosure);
597 walk_mem_region_with_cl_DECL(FilteringClosure);
599 public:
600 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
601 CMSCollector* collector,
602 OopClosure* cl,
603 CardTableModRefBS::PrecisionStyle precision,
604 HeapWord* boundary) :
605 Filtering_DCTOC(sp, cl, precision, boundary),
606 _cfls(sp), _collector(collector) {}
607 };
609 // We de-virtualize the block-related calls below, since we know that our
610 // space is a CompactibleFreeListSpace.
611 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
612 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
613 HeapWord* bottom, \
614 HeapWord* top, \
615 ClosureType* cl) { \
616 if (SharedHeap::heap()->n_par_threads() > 0) { \
617 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
618 } else { \
619 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
620 } \
621 } \
622 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
623 HeapWord* bottom, \
624 HeapWord* top, \
625 ClosureType* cl) { \
626 /* Skip parts that are before "mr", in case "block_start" sent us \
627 back too far. */ \
628 HeapWord* mr_start = mr.start(); \
629 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
630 HeapWord* next = bottom + bot_size; \
631 while (next < mr_start) { \
632 bottom = next; \
633 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
634 next = bottom + bot_size; \
635 } \
636 \
637 while (bottom < top) { \
638 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
639 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
640 oop(bottom)) && \
641 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
642 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
643 bottom += _cfls->adjustObjectSize(word_sz); \
644 } else { \
645 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
646 } \
647 } \
648 } \
649 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
650 HeapWord* bottom, \
651 HeapWord* top, \
652 ClosureType* cl) { \
653 /* Skip parts that are before "mr", in case "block_start" sent us \
654 back too far. */ \
655 HeapWord* mr_start = mr.start(); \
656 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
657 HeapWord* next = bottom + bot_size; \
658 while (next < mr_start) { \
659 bottom = next; \
660 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
661 next = bottom + bot_size; \
662 } \
663 \
664 while (bottom < top) { \
665 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
666 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
667 oop(bottom)) && \
668 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
669 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
670 bottom += _cfls->adjustObjectSize(word_sz); \
671 } else { \
672 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
673 } \
674 } \
675 }
677 // (There are only two of these, rather than N, because the split is due
678 // only to the introduction of the FilteringClosure, a local part of the
679 // impl of this abstraction.)
680 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
681 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
683 DirtyCardToOopClosure*
684 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
685 CardTableModRefBS::PrecisionStyle precision,
686 HeapWord* boundary) {
687 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
688 }
691 // Note on locking for the space iteration functions:
692 // since the collector's iteration activities are concurrent with
693 // allocation activities by mutators, absent a suitable mutual exclusion
694 // mechanism the iterators may go awry. For instace a block being iterated
695 // may suddenly be allocated or divided up and part of it allocated and
696 // so on.
698 // Apply the given closure to each block in the space.
699 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
700 assert_lock_strong(freelistLock());
701 HeapWord *cur, *limit;
702 for (cur = bottom(), limit = end(); cur < limit;
703 cur += cl->do_blk_careful(cur));
704 }
706 // Apply the given closure to each block in the space.
707 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
708 assert_lock_strong(freelistLock());
709 HeapWord *cur, *limit;
710 for (cur = bottom(), limit = end(); cur < limit;
711 cur += cl->do_blk(cur));
712 }
714 // Apply the given closure to each oop in the space.
715 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
716 assert_lock_strong(freelistLock());
717 HeapWord *cur, *limit;
718 size_t curSize;
719 for (cur = bottom(), limit = end(); cur < limit;
720 cur += curSize) {
721 curSize = block_size(cur);
722 if (block_is_obj(cur)) {
723 oop(cur)->oop_iterate(cl);
724 }
725 }
726 }
728 // Apply the given closure to each oop in the space \intersect memory region.
729 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
730 assert_lock_strong(freelistLock());
731 if (is_empty()) {
732 return;
733 }
734 MemRegion cur = MemRegion(bottom(), end());
735 mr = mr.intersection(cur);
736 if (mr.is_empty()) {
737 return;
738 }
739 if (mr.equals(cur)) {
740 oop_iterate(cl);
741 return;
742 }
743 assert(mr.end() <= end(), "just took an intersection above");
744 HeapWord* obj_addr = block_start(mr.start());
745 HeapWord* t = mr.end();
747 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
748 if (block_is_obj(obj_addr)) {
749 // Handle first object specially.
750 oop obj = oop(obj_addr);
751 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
752 } else {
753 FreeChunk* fc = (FreeChunk*)obj_addr;
754 obj_addr += fc->size();
755 }
756 while (obj_addr < t) {
757 HeapWord* obj = obj_addr;
758 obj_addr += block_size(obj_addr);
759 // If "obj_addr" is not greater than top, then the
760 // entire object "obj" is within the region.
761 if (obj_addr <= t) {
762 if (block_is_obj(obj)) {
763 oop(obj)->oop_iterate(cl);
764 }
765 } else {
766 // "obj" extends beyond end of region
767 if (block_is_obj(obj)) {
768 oop(obj)->oop_iterate(&smr_blk);
769 }
770 break;
771 }
772 }
773 }
775 // NOTE: In the following methods, in order to safely be able to
776 // apply the closure to an object, we need to be sure that the
777 // object has been initialized. We are guaranteed that an object
778 // is initialized if we are holding the Heap_lock with the
779 // world stopped.
780 void CompactibleFreeListSpace::verify_objects_initialized() const {
781 if (is_init_completed()) {
782 assert_locked_or_safepoint(Heap_lock);
783 if (Universe::is_fully_initialized()) {
784 guarantee(SafepointSynchronize::is_at_safepoint(),
785 "Required for objects to be initialized");
786 }
787 } // else make a concession at vm start-up
788 }
790 // Apply the given closure to each object in the space
791 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
792 assert_lock_strong(freelistLock());
793 NOT_PRODUCT(verify_objects_initialized());
794 HeapWord *cur, *limit;
795 size_t curSize;
796 for (cur = bottom(), limit = end(); cur < limit;
797 cur += curSize) {
798 curSize = block_size(cur);
799 if (block_is_obj(cur)) {
800 blk->do_object(oop(cur));
801 }
802 }
803 }
805 // Apply the given closure to each live object in the space
806 // The usage of CompactibleFreeListSpace
807 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
808 // objects in the space with references to objects that are no longer
809 // valid. For example, an object may reference another object
810 // that has already been sweep up (collected). This method uses
811 // obj_is_alive() to determine whether it is safe to apply the closure to
812 // an object. See obj_is_alive() for details on how liveness of an
813 // object is decided.
815 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
816 assert_lock_strong(freelistLock());
817 NOT_PRODUCT(verify_objects_initialized());
818 HeapWord *cur, *limit;
819 size_t curSize;
820 for (cur = bottom(), limit = end(); cur < limit;
821 cur += curSize) {
822 curSize = block_size(cur);
823 if (block_is_obj(cur) && obj_is_alive(cur)) {
824 blk->do_object(oop(cur));
825 }
826 }
827 }
829 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
830 UpwardsObjectClosure* cl) {
831 assert_locked(freelistLock());
832 NOT_PRODUCT(verify_objects_initialized());
833 Space::object_iterate_mem(mr, cl);
834 }
836 // Callers of this iterator beware: The closure application should
837 // be robust in the face of uninitialized objects and should (always)
838 // return a correct size so that the next addr + size below gives us a
839 // valid block boundary. [See for instance,
840 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
841 // in ConcurrentMarkSweepGeneration.cpp.]
842 HeapWord*
843 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
844 assert_lock_strong(freelistLock());
845 HeapWord *addr, *last;
846 size_t size;
847 for (addr = bottom(), last = end();
848 addr < last; addr += size) {
849 FreeChunk* fc = (FreeChunk*)addr;
850 if (fc->isFree()) {
851 // Since we hold the free list lock, which protects direct
852 // allocation in this generation by mutators, a free object
853 // will remain free throughout this iteration code.
854 size = fc->size();
855 } else {
856 // Note that the object need not necessarily be initialized,
857 // because (for instance) the free list lock does NOT protect
858 // object initialization. The closure application below must
859 // therefore be correct in the face of uninitialized objects.
860 size = cl->do_object_careful(oop(addr));
861 if (size == 0) {
862 // An unparsable object found. Signal early termination.
863 return addr;
864 }
865 }
866 }
867 return NULL;
868 }
870 // Callers of this iterator beware: The closure application should
871 // be robust in the face of uninitialized objects and should (always)
872 // return a correct size so that the next addr + size below gives us a
873 // valid block boundary. [See for instance,
874 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
875 // in ConcurrentMarkSweepGeneration.cpp.]
876 HeapWord*
877 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
878 ObjectClosureCareful* cl) {
879 assert_lock_strong(freelistLock());
880 // Can't use used_region() below because it may not necessarily
881 // be the same as [bottom(),end()); although we could
882 // use [used_region().start(),round_to(used_region().end(),CardSize)),
883 // that appears too cumbersome, so we just do the simpler check
884 // in the assertion below.
885 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
886 "mr should be non-empty and within used space");
887 HeapWord *addr, *end;
888 size_t size;
889 for (addr = block_start_careful(mr.start()), end = mr.end();
890 addr < end; addr += size) {
891 FreeChunk* fc = (FreeChunk*)addr;
892 if (fc->isFree()) {
893 // Since we hold the free list lock, which protects direct
894 // allocation in this generation by mutators, a free object
895 // will remain free throughout this iteration code.
896 size = fc->size();
897 } else {
898 // Note that the object need not necessarily be initialized,
899 // because (for instance) the free list lock does NOT protect
900 // object initialization. The closure application below must
901 // therefore be correct in the face of uninitialized objects.
902 size = cl->do_object_careful_m(oop(addr), mr);
903 if (size == 0) {
904 // An unparsable object found. Signal early termination.
905 return addr;
906 }
907 }
908 }
909 return NULL;
910 }
913 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
914 NOT_PRODUCT(verify_objects_initialized());
915 return _bt.block_start(p);
916 }
918 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
919 return _bt.block_start_careful(p);
920 }
922 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
923 NOT_PRODUCT(verify_objects_initialized());
924 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
925 // This must be volatile, or else there is a danger that the compiler
926 // will compile the code below into a sometimes-infinite loop, by keeping
927 // the value read the first time in a register.
928 while (true) {
929 // We must do this until we get a consistent view of the object.
930 if (FreeChunk::indicatesFreeChunk(p)) {
931 volatile FreeChunk* fc = (volatile FreeChunk*)p;
932 size_t res = fc->size();
933 // If the object is still a free chunk, return the size, else it
934 // has been allocated so try again.
935 if (FreeChunk::indicatesFreeChunk(p)) {
936 assert(res != 0, "Block size should not be 0");
937 return res;
938 }
939 } else {
940 // must read from what 'p' points to in each loop.
941 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
942 if (k != NULL) {
943 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
944 oop o = (oop)p;
945 assert(o->is_parsable(), "Should be parsable");
946 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
947 size_t res = o->size_given_klass(k->klass_part());
948 res = adjustObjectSize(res);
949 assert(res != 0, "Block size should not be 0");
950 return res;
951 }
952 }
953 }
954 }
956 // A variant of the above that uses the Printezis bits for
957 // unparsable but allocated objects. This avoids any possible
958 // stalls waiting for mutators to initialize objects, and is
959 // thus potentially faster than the variant above. However,
960 // this variant may return a zero size for a block that is
961 // under mutation and for which a consistent size cannot be
962 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
963 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
964 const CMSCollector* c)
965 const {
966 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
967 // This must be volatile, or else there is a danger that the compiler
968 // will compile the code below into a sometimes-infinite loop, by keeping
969 // the value read the first time in a register.
970 DEBUG_ONLY(uint loops = 0;)
971 while (true) {
972 // We must do this until we get a consistent view of the object.
973 if (FreeChunk::indicatesFreeChunk(p)) {
974 volatile FreeChunk* fc = (volatile FreeChunk*)p;
975 size_t res = fc->size();
976 if (FreeChunk::indicatesFreeChunk(p)) {
977 assert(res != 0, "Block size should not be 0");
978 assert(loops == 0, "Should be 0");
979 return res;
980 }
981 } else {
982 // must read from what 'p' points to in each loop.
983 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
984 if (k != NULL &&
985 ((oopDesc*)p)->is_parsable() &&
986 ((oopDesc*)p)->is_conc_safe()) {
987 assert(k->is_oop(), "Should really be klass oop.");
988 oop o = (oop)p;
989 assert(o->is_oop(), "Should be an oop");
990 size_t res = o->size_given_klass(k->klass_part());
991 res = adjustObjectSize(res);
992 assert(res != 0, "Block size should not be 0");
993 return res;
994 } else {
995 return c->block_size_if_printezis_bits(p);
996 }
997 }
998 assert(loops == 0, "Can loop at most once");
999 DEBUG_ONLY(loops++;)
1000 }
1001 }
1003 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1004 NOT_PRODUCT(verify_objects_initialized());
1005 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1006 FreeChunk* fc = (FreeChunk*)p;
1007 if (fc->isFree()) {
1008 return fc->size();
1009 } else {
1010 // Ignore mark word because this may be a recently promoted
1011 // object whose mark word is used to chain together grey
1012 // objects (the last one would have a null value).
1013 assert(oop(p)->is_oop(true), "Should be an oop");
1014 return adjustObjectSize(oop(p)->size());
1015 }
1016 }
1018 // This implementation assumes that the property of "being an object" is
1019 // stable. But being a free chunk may not be (because of parallel
1020 // promotion.)
1021 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1022 FreeChunk* fc = (FreeChunk*)p;
1023 assert(is_in_reserved(p), "Should be in space");
1024 // When doing a mark-sweep-compact of the CMS generation, this
1025 // assertion may fail because prepare_for_compaction() uses
1026 // space that is garbage to maintain information on ranges of
1027 // live objects so that these live ranges can be moved as a whole.
1028 // Comment out this assertion until that problem can be solved
1029 // (i.e., that the block start calculation may look at objects
1030 // at address below "p" in finding the object that contains "p"
1031 // and those objects (if garbage) may have been modified to hold
1032 // live range information.
1033 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
1034 if (FreeChunk::indicatesFreeChunk(p)) return false;
1035 klassOop k = oop(p)->klass_or_null();
1036 if (k != NULL) {
1037 // Ignore mark word because it may have been used to
1038 // chain together promoted objects (the last one
1039 // would have a null value).
1040 assert(oop(p)->is_oop(true), "Should be an oop");
1041 return true;
1042 } else {
1043 return false; // Was not an object at the start of collection.
1044 }
1045 }
1047 // Check if the object is alive. This fact is checked either by consulting
1048 // the main marking bitmap in the sweeping phase or, if it's a permanent
1049 // generation and we're not in the sweeping phase, by checking the
1050 // perm_gen_verify_bit_map where we store the "deadness" information if
1051 // we did not sweep the perm gen in the most recent previous GC cycle.
1052 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1053 assert (block_is_obj(p), "The address should point to an object");
1055 // If we're sweeping, we use object liveness information from the main bit map
1056 // for both perm gen and old gen.
1057 // We don't need to lock the bitmap (live_map or dead_map below), because
1058 // EITHER we are in the middle of the sweeping phase, and the
1059 // main marking bit map (live_map below) is locked,
1060 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1061 // is stable, because it's mutated only in the sweeping phase.
1062 if (_collector->abstract_state() == CMSCollector::Sweeping) {
1063 CMSBitMap* live_map = _collector->markBitMap();
1064 return live_map->isMarked((HeapWord*) p);
1065 } else {
1066 // If we're not currently sweeping and we haven't swept the perm gen in
1067 // the previous concurrent cycle then we may have dead but unswept objects
1068 // in the perm gen. In this case, we use the "deadness" information
1069 // that we had saved in perm_gen_verify_bit_map at the last sweep.
1070 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1071 if (_collector->verifying()) {
1072 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1073 // Object is marked in the dead_map bitmap at the previous sweep
1074 // when we know that it's dead; if the bitmap is not allocated then
1075 // the object is alive.
1076 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1077 || !dead_map->par_isMarked((HeapWord*) p);
1078 } else {
1079 return false; // We can't say for sure if it's live, so we say that it's dead.
1080 }
1081 }
1082 }
1083 return true;
1084 }
1086 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1087 FreeChunk* fc = (FreeChunk*)p;
1088 assert(is_in_reserved(p), "Should be in space");
1089 assert(_bt.block_start(p) == p, "Should be a block boundary");
1090 if (!fc->isFree()) {
1091 // Ignore mark word because it may have been used to
1092 // chain together promoted objects (the last one
1093 // would have a null value).
1094 assert(oop(p)->is_oop(true), "Should be an oop");
1095 return true;
1096 }
1097 return false;
1098 }
1100 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1101 // approximate answer if you don't hold the freelistlock when you call this.
1102 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1103 size_t size = 0;
1104 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1105 debug_only(
1106 // We may be calling here without the lock in which case we
1107 // won't do this modest sanity check.
1108 if (freelistLock()->owned_by_self()) {
1109 size_t total_list_size = 0;
1110 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1111 fc = fc->next()) {
1112 total_list_size += i;
1113 }
1114 assert(total_list_size == i * _indexedFreeList[i].count(),
1115 "Count in list is incorrect");
1116 }
1117 )
1118 size += i * _indexedFreeList[i].count();
1119 }
1120 return size;
1121 }
1123 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1124 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1125 return allocate(size);
1126 }
1128 HeapWord*
1129 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1130 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1131 }
1133 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1134 assert_lock_strong(freelistLock());
1135 HeapWord* res = NULL;
1136 assert(size == adjustObjectSize(size),
1137 "use adjustObjectSize() before calling into allocate()");
1139 if (_adaptive_freelists) {
1140 res = allocate_adaptive_freelists(size);
1141 } else { // non-adaptive free lists
1142 res = allocate_non_adaptive_freelists(size);
1143 }
1145 if (res != NULL) {
1146 // check that res does lie in this space!
1147 assert(is_in_reserved(res), "Not in this space!");
1148 assert(is_aligned((void*)res), "alignment check");
1150 FreeChunk* fc = (FreeChunk*)res;
1151 fc->markNotFree();
1152 assert(!fc->isFree(), "shouldn't be marked free");
1153 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1154 // Verify that the block offset table shows this to
1155 // be a single block, but not one which is unallocated.
1156 _bt.verify_single_block(res, size);
1157 _bt.verify_not_unallocated(res, size);
1158 // mangle a just allocated object with a distinct pattern.
1159 debug_only(fc->mangleAllocated(size));
1160 }
1162 return res;
1163 }
1165 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1166 HeapWord* res = NULL;
1167 // try and use linear allocation for smaller blocks
1168 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1169 // if successful, the following also adjusts block offset table
1170 res = getChunkFromSmallLinearAllocBlock(size);
1171 }
1172 // Else triage to indexed lists for smaller sizes
1173 if (res == NULL) {
1174 if (size < SmallForDictionary) {
1175 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1176 } else {
1177 // else get it from the big dictionary; if even this doesn't
1178 // work we are out of luck.
1179 res = (HeapWord*)getChunkFromDictionaryExact(size);
1180 }
1181 }
1183 return res;
1184 }
1186 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1187 assert_lock_strong(freelistLock());
1188 HeapWord* res = NULL;
1189 assert(size == adjustObjectSize(size),
1190 "use adjustObjectSize() before calling into allocate()");
1192 // Strategy
1193 // if small
1194 // exact size from small object indexed list if small
1195 // small or large linear allocation block (linAB) as appropriate
1196 // take from lists of greater sized chunks
1197 // else
1198 // dictionary
1199 // small or large linear allocation block if it has the space
1200 // Try allocating exact size from indexTable first
1201 if (size < IndexSetSize) {
1202 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1203 if(res != NULL) {
1204 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1205 "Not removed from free list");
1206 // no block offset table adjustment is necessary on blocks in
1207 // the indexed lists.
1209 // Try allocating from the small LinAB
1210 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1211 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1212 // if successful, the above also adjusts block offset table
1213 // Note that this call will refill the LinAB to
1214 // satisfy the request. This is different that
1215 // evm.
1216 // Don't record chunk off a LinAB? smallSplitBirth(size);
1218 } else {
1219 // Raid the exact free lists larger than size, even if they are not
1220 // overpopulated.
1221 res = (HeapWord*) getChunkFromGreater(size);
1222 }
1223 } else {
1224 // Big objects get allocated directly from the dictionary.
1225 res = (HeapWord*) getChunkFromDictionaryExact(size);
1226 if (res == NULL) {
1227 // Try hard not to fail since an allocation failure will likely
1228 // trigger a synchronous GC. Try to get the space from the
1229 // allocation blocks.
1230 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1231 }
1232 }
1234 return res;
1235 }
1237 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1238 // when promoting obj.
1239 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1240 // Depending on the object size, expansion may require refilling either a
1241 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
1242 // is added because the dictionary may over-allocate to avoid fragmentation.
1243 size_t space = obj_size;
1244 if (!_adaptive_freelists) {
1245 space = MAX2(space, _smallLinearAllocBlock._refillSize);
1246 }
1247 space += _promoInfo.refillSize() + 2 * MinChunkSize;
1248 return space;
1249 }
1251 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1252 FreeChunk* ret;
1254 assert(numWords >= MinChunkSize, "Size is less than minimum");
1255 assert(linearAllocationWouldFail() || bestFitFirst(),
1256 "Should not be here");
1258 size_t i;
1259 size_t currSize = numWords + MinChunkSize;
1260 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1261 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1262 FreeList* fl = &_indexedFreeList[i];
1263 if (fl->head()) {
1264 ret = getFromListGreater(fl, numWords);
1265 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1266 return ret;
1267 }
1268 }
1270 currSize = MAX2((size_t)SmallForDictionary,
1271 (size_t)(numWords + MinChunkSize));
1273 /* Try to get a chunk that satisfies request, while avoiding
1274 fragmentation that can't be handled. */
1275 {
1276 ret = dictionary()->getChunk(currSize);
1277 if (ret != NULL) {
1278 assert(ret->size() - numWords >= MinChunkSize,
1279 "Chunk is too small");
1280 _bt.allocated((HeapWord*)ret, ret->size());
1281 /* Carve returned chunk. */
1282 (void) splitChunkAndReturnRemainder(ret, numWords);
1283 /* Label this as no longer a free chunk. */
1284 assert(ret->isFree(), "This chunk should be free");
1285 ret->linkPrev(NULL);
1286 }
1287 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1288 return ret;
1289 }
1290 ShouldNotReachHere();
1291 }
1293 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
1294 const {
1295 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1296 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1297 }
1299 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1300 if (fc->size() >= IndexSetSize) {
1301 return dictionary()->verifyChunkInFreeLists(fc);
1302 } else {
1303 return verifyChunkInIndexedFreeLists(fc);
1304 }
1305 }
1307 #ifndef PRODUCT
1308 void CompactibleFreeListSpace::assert_locked() const {
1309 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1310 }
1312 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1313 CMSLockVerifier::assert_locked(lock);
1314 }
1315 #endif
1317 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1318 // In the parallel case, the main thread holds the free list lock
1319 // on behalf the parallel threads.
1320 FreeChunk* fc;
1321 {
1322 // If GC is parallel, this might be called by several threads.
1323 // This should be rare enough that the locking overhead won't affect
1324 // the sequential code.
1325 MutexLockerEx x(parDictionaryAllocLock(),
1326 Mutex::_no_safepoint_check_flag);
1327 fc = getChunkFromDictionary(size);
1328 }
1329 if (fc != NULL) {
1330 fc->dontCoalesce();
1331 assert(fc->isFree(), "Should be free, but not coalescable");
1332 // Verify that the block offset table shows this to
1333 // be a single block, but not one which is unallocated.
1334 _bt.verify_single_block((HeapWord*)fc, fc->size());
1335 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1336 }
1337 return fc;
1338 }
1340 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1341 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1342 assert_locked();
1344 // if we are tracking promotions, then first ensure space for
1345 // promotion (including spooling space for saving header if necessary).
1346 // then allocate and copy, then track promoted info if needed.
1347 // When tracking (see PromotionInfo::track()), the mark word may
1348 // be displaced and in this case restoration of the mark word
1349 // occurs in the (oop_since_save_marks_)iterate phase.
1350 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1351 return NULL;
1352 }
1353 // Call the allocate(size_t, bool) form directly to avoid the
1354 // additional call through the allocate(size_t) form. Having
1355 // the compile inline the call is problematic because allocate(size_t)
1356 // is a virtual method.
1357 HeapWord* res = allocate(adjustObjectSize(obj_size));
1358 if (res != NULL) {
1359 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1360 // if we should be tracking promotions, do so.
1361 if (_promoInfo.tracking()) {
1362 _promoInfo.track((PromotedObject*)res);
1363 }
1364 }
1365 return oop(res);
1366 }
1368 HeapWord*
1369 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1370 assert_locked();
1371 assert(size >= MinChunkSize, "minimum chunk size");
1372 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1373 "maximum from smallLinearAllocBlock");
1374 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1375 }
1377 HeapWord*
1378 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1379 size_t size) {
1380 assert_locked();
1381 assert(size >= MinChunkSize, "too small");
1382 HeapWord* res = NULL;
1383 // Try to do linear allocation from blk, making sure that
1384 if (blk->_word_size == 0) {
1385 // We have probably been unable to fill this either in the prologue or
1386 // when it was exhausted at the last linear allocation. Bail out until
1387 // next time.
1388 assert(blk->_ptr == NULL, "consistency check");
1389 return NULL;
1390 }
1391 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1392 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1393 if (res != NULL) return res;
1395 // about to exhaust this linear allocation block
1396 if (blk->_word_size == size) { // exactly satisfied
1397 res = blk->_ptr;
1398 _bt.allocated(res, blk->_word_size);
1399 } else if (size + MinChunkSize <= blk->_refillSize) {
1400 size_t sz = blk->_word_size;
1401 // Update _unallocated_block if the size is such that chunk would be
1402 // returned to the indexed free list. All other chunks in the indexed
1403 // free lists are allocated from the dictionary so that _unallocated_block
1404 // has already been adjusted for them. Do it here so that the cost
1405 // for all chunks added back to the indexed free lists.
1406 if (sz < SmallForDictionary) {
1407 _bt.allocated(blk->_ptr, sz);
1408 }
1409 // Return the chunk that isn't big enough, and then refill below.
1410 addChunkToFreeLists(blk->_ptr, sz);
1411 splitBirth(sz);
1412 // Don't keep statistics on adding back chunk from a LinAB.
1413 } else {
1414 // A refilled block would not satisfy the request.
1415 return NULL;
1416 }
1418 blk->_ptr = NULL; blk->_word_size = 0;
1419 refillLinearAllocBlock(blk);
1420 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1421 "block was replenished");
1422 if (res != NULL) {
1423 splitBirth(size);
1424 repairLinearAllocBlock(blk);
1425 } else if (blk->_ptr != NULL) {
1426 res = blk->_ptr;
1427 size_t blk_size = blk->_word_size;
1428 blk->_word_size -= size;
1429 blk->_ptr += size;
1430 splitBirth(size);
1431 repairLinearAllocBlock(blk);
1432 // Update BOT last so that other (parallel) GC threads see a consistent
1433 // view of the BOT and free blocks.
1434 // Above must occur before BOT is updated below.
1435 _bt.split_block(res, blk_size, size); // adjust block offset table
1436 }
1437 return res;
1438 }
1440 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1441 LinearAllocBlock* blk,
1442 size_t size) {
1443 assert_locked();
1444 assert(size >= MinChunkSize, "too small");
1446 HeapWord* res = NULL;
1447 // This is the common case. Keep it simple.
1448 if (blk->_word_size >= size + MinChunkSize) {
1449 assert(blk->_ptr != NULL, "consistency check");
1450 res = blk->_ptr;
1451 // Note that the BOT is up-to-date for the linAB before allocation. It
1452 // indicates the start of the linAB. The split_block() updates the
1453 // BOT for the linAB after the allocation (indicates the start of the
1454 // next chunk to be allocated).
1455 size_t blk_size = blk->_word_size;
1456 blk->_word_size -= size;
1457 blk->_ptr += size;
1458 splitBirth(size);
1459 repairLinearAllocBlock(blk);
1460 // Update BOT last so that other (parallel) GC threads see a consistent
1461 // view of the BOT and free blocks.
1462 // Above must occur before BOT is updated below.
1463 _bt.split_block(res, blk_size, size); // adjust block offset table
1464 _bt.allocated(res, size);
1465 }
1466 return res;
1467 }
1469 FreeChunk*
1470 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1471 assert_locked();
1472 assert(size < SmallForDictionary, "just checking");
1473 FreeChunk* res;
1474 res = _indexedFreeList[size].getChunkAtHead();
1475 if (res == NULL) {
1476 res = getChunkFromIndexedFreeListHelper(size);
1477 }
1478 _bt.verify_not_unallocated((HeapWord*) res, size);
1479 assert(res == NULL || res->size() == size, "Incorrect block size");
1480 return res;
1481 }
1483 FreeChunk*
1484 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1485 bool replenish) {
1486 assert_locked();
1487 FreeChunk* fc = NULL;
1488 if (size < SmallForDictionary) {
1489 assert(_indexedFreeList[size].head() == NULL ||
1490 _indexedFreeList[size].surplus() <= 0,
1491 "List for this size should be empty or under populated");
1492 // Try best fit in exact lists before replenishing the list
1493 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1494 // Replenish list.
1495 //
1496 // Things tried that failed.
1497 // Tried allocating out of the two LinAB's first before
1498 // replenishing lists.
1499 // Tried small linAB of size 256 (size in indexed list)
1500 // and replenishing indexed lists from the small linAB.
1501 //
1502 FreeChunk* newFc = NULL;
1503 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1504 if (replenish_size < SmallForDictionary) {
1505 // Do not replenish from an underpopulated size.
1506 if (_indexedFreeList[replenish_size].surplus() > 0 &&
1507 _indexedFreeList[replenish_size].head() != NULL) {
1508 newFc = _indexedFreeList[replenish_size].getChunkAtHead();
1509 } else if (bestFitFirst()) {
1510 newFc = bestFitSmall(replenish_size);
1511 }
1512 }
1513 if (newFc == NULL && replenish_size > size) {
1514 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1515 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1516 }
1517 // Note: The stats update re split-death of block obtained above
1518 // will be recorded below precisely when we know we are going to
1519 // be actually splitting it into more than one pieces below.
1520 if (newFc != NULL) {
1521 if (replenish || CMSReplenishIntermediate) {
1522 // Replenish this list and return one block to caller.
1523 size_t i;
1524 FreeChunk *curFc, *nextFc;
1525 size_t num_blk = newFc->size() / size;
1526 assert(num_blk >= 1, "Smaller than requested?");
1527 assert(newFc->size() % size == 0, "Should be integral multiple of request");
1528 if (num_blk > 1) {
1529 // we are sure we will be splitting the block just obtained
1530 // into multiple pieces; record the split-death of the original
1531 splitDeath(replenish_size);
1532 }
1533 // carve up and link blocks 0, ..., num_blk - 2
1534 // The last chunk is not added to the lists but is returned as the
1535 // free chunk.
1536 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1537 i = 0;
1538 i < (num_blk - 1);
1539 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1540 i++) {
1541 curFc->setSize(size);
1542 // Don't record this as a return in order to try and
1543 // determine the "returns" from a GC.
1544 _bt.verify_not_unallocated((HeapWord*) fc, size);
1545 _indexedFreeList[size].returnChunkAtTail(curFc, false);
1546 _bt.mark_block((HeapWord*)curFc, size);
1547 splitBirth(size);
1548 // Don't record the initial population of the indexed list
1549 // as a split birth.
1550 }
1552 // check that the arithmetic was OK above
1553 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1554 "inconsistency in carving newFc");
1555 curFc->setSize(size);
1556 _bt.mark_block((HeapWord*)curFc, size);
1557 splitBirth(size);
1558 fc = curFc;
1559 } else {
1560 // Return entire block to caller
1561 fc = newFc;
1562 }
1563 }
1564 }
1565 } else {
1566 // Get a free chunk from the free chunk dictionary to be returned to
1567 // replenish the indexed free list.
1568 fc = getChunkFromDictionaryExact(size);
1569 }
1570 // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1571 return fc;
1572 }
1574 FreeChunk*
1575 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1576 assert_locked();
1577 FreeChunk* fc = _dictionary->getChunk(size);
1578 if (fc == NULL) {
1579 return NULL;
1580 }
1581 _bt.allocated((HeapWord*)fc, fc->size());
1582 if (fc->size() >= size + MinChunkSize) {
1583 fc = splitChunkAndReturnRemainder(fc, size);
1584 }
1585 assert(fc->size() >= size, "chunk too small");
1586 assert(fc->size() < size + MinChunkSize, "chunk too big");
1587 _bt.verify_single_block((HeapWord*)fc, fc->size());
1588 return fc;
1589 }
1591 FreeChunk*
1592 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1593 assert_locked();
1594 FreeChunk* fc = _dictionary->getChunk(size);
1595 if (fc == NULL) {
1596 return fc;
1597 }
1598 _bt.allocated((HeapWord*)fc, fc->size());
1599 if (fc->size() == size) {
1600 _bt.verify_single_block((HeapWord*)fc, size);
1601 return fc;
1602 }
1603 assert(fc->size() > size, "getChunk() guarantee");
1604 if (fc->size() < size + MinChunkSize) {
1605 // Return the chunk to the dictionary and go get a bigger one.
1606 returnChunkToDictionary(fc);
1607 fc = _dictionary->getChunk(size + MinChunkSize);
1608 if (fc == NULL) {
1609 return NULL;
1610 }
1611 _bt.allocated((HeapWord*)fc, fc->size());
1612 }
1613 assert(fc->size() >= size + MinChunkSize, "tautology");
1614 fc = splitChunkAndReturnRemainder(fc, size);
1615 assert(fc->size() == size, "chunk is wrong size");
1616 _bt.verify_single_block((HeapWord*)fc, size);
1617 return fc;
1618 }
1620 void
1621 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1622 assert_locked();
1624 size_t size = chunk->size();
1625 _bt.verify_single_block((HeapWord*)chunk, size);
1626 // adjust _unallocated_block downward, as necessary
1627 _bt.freed((HeapWord*)chunk, size);
1628 _dictionary->returnChunk(chunk);
1629 #ifndef PRODUCT
1630 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1631 TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
1632 }
1633 #endif // PRODUCT
1634 }
1636 void
1637 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1638 assert_locked();
1639 size_t size = fc->size();
1640 _bt.verify_single_block((HeapWord*) fc, size);
1641 _bt.verify_not_unallocated((HeapWord*) fc, size);
1642 if (_adaptive_freelists) {
1643 _indexedFreeList[size].returnChunkAtTail(fc);
1644 } else {
1645 _indexedFreeList[size].returnChunkAtHead(fc);
1646 }
1647 #ifndef PRODUCT
1648 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1649 _indexedFreeList[size].verify_stats();
1650 }
1651 #endif // PRODUCT
1652 }
1654 // Add chunk to end of last block -- if it's the largest
1655 // block -- and update BOT and census data. We would
1656 // of course have preferred to coalesce it with the
1657 // last block, but it's currently less expensive to find the
1658 // largest block than it is to find the last.
1659 void
1660 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1661 HeapWord* chunk, size_t size) {
1662 // check that the chunk does lie in this space!
1663 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1664 // One of the parallel gc task threads may be here
1665 // whilst others are allocating.
1666 Mutex* lock = NULL;
1667 if (ParallelGCThreads != 0) {
1668 lock = &_parDictionaryAllocLock;
1669 }
1670 FreeChunk* ec;
1671 {
1672 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1673 ec = dictionary()->findLargestDict(); // get largest block
1674 if (ec != NULL && ec->end() == chunk) {
1675 // It's a coterminal block - we can coalesce.
1676 size_t old_size = ec->size();
1677 coalDeath(old_size);
1678 removeChunkFromDictionary(ec);
1679 size += old_size;
1680 } else {
1681 ec = (FreeChunk*)chunk;
1682 }
1683 }
1684 ec->setSize(size);
1685 debug_only(ec->mangleFreed(size));
1686 if (size < SmallForDictionary) {
1687 lock = _indexedFreeListParLocks[size];
1688 }
1689 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1690 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1691 // record the birth under the lock since the recording involves
1692 // manipulation of the list on which the chunk lives and
1693 // if the chunk is allocated and is the last on the list,
1694 // the list can go away.
1695 coalBirth(size);
1696 }
1698 void
1699 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1700 size_t size) {
1701 // check that the chunk does lie in this space!
1702 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1703 assert_locked();
1704 _bt.verify_single_block(chunk, size);
1706 FreeChunk* fc = (FreeChunk*) chunk;
1707 fc->setSize(size);
1708 debug_only(fc->mangleFreed(size));
1709 if (size < SmallForDictionary) {
1710 returnChunkToFreeList(fc);
1711 } else {
1712 returnChunkToDictionary(fc);
1713 }
1714 }
1716 void
1717 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1718 size_t size, bool coalesced) {
1719 assert_locked();
1720 assert(chunk != NULL, "null chunk");
1721 if (coalesced) {
1722 // repair BOT
1723 _bt.single_block(chunk, size);
1724 }
1725 addChunkToFreeLists(chunk, size);
1726 }
1728 // We _must_ find the purported chunk on our free lists;
1729 // we assert if we don't.
1730 void
1731 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1732 size_t size = fc->size();
1733 assert_locked();
1734 debug_only(verifyFreeLists());
1735 if (size < SmallForDictionary) {
1736 removeChunkFromIndexedFreeList(fc);
1737 } else {
1738 removeChunkFromDictionary(fc);
1739 }
1740 _bt.verify_single_block((HeapWord*)fc, size);
1741 debug_only(verifyFreeLists());
1742 }
1744 void
1745 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1746 size_t size = fc->size();
1747 assert_locked();
1748 assert(fc != NULL, "null chunk");
1749 _bt.verify_single_block((HeapWord*)fc, size);
1750 _dictionary->removeChunk(fc);
1751 // adjust _unallocated_block upward, as necessary
1752 _bt.allocated((HeapWord*)fc, size);
1753 }
1755 void
1756 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1757 assert_locked();
1758 size_t size = fc->size();
1759 _bt.verify_single_block((HeapWord*)fc, size);
1760 NOT_PRODUCT(
1761 if (FLSVerifyIndexTable) {
1762 verifyIndexedFreeList(size);
1763 }
1764 )
1765 _indexedFreeList[size].removeChunk(fc);
1766 debug_only(fc->clearNext());
1767 debug_only(fc->clearPrev());
1768 NOT_PRODUCT(
1769 if (FLSVerifyIndexTable) {
1770 verifyIndexedFreeList(size);
1771 }
1772 )
1773 }
1775 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1776 /* A hint is the next larger size that has a surplus.
1777 Start search at a size large enough to guarantee that
1778 the excess is >= MIN_CHUNK. */
1779 size_t start = align_object_size(numWords + MinChunkSize);
1780 if (start < IndexSetSize) {
1781 FreeList* it = _indexedFreeList;
1782 size_t hint = _indexedFreeList[start].hint();
1783 while (hint < IndexSetSize) {
1784 assert(hint % MinObjAlignment == 0, "hint should be aligned");
1785 FreeList *fl = &_indexedFreeList[hint];
1786 if (fl->surplus() > 0 && fl->head() != NULL) {
1787 // Found a list with surplus, reset original hint
1788 // and split out a free chunk which is returned.
1789 _indexedFreeList[start].set_hint(hint);
1790 FreeChunk* res = getFromListGreater(fl, numWords);
1791 assert(res == NULL || res->isFree(),
1792 "Should be returning a free chunk");
1793 return res;
1794 }
1795 hint = fl->hint(); /* keep looking */
1796 }
1797 /* None found. */
1798 it[start].set_hint(IndexSetSize);
1799 }
1800 return NULL;
1801 }
1803 /* Requires fl->size >= numWords + MinChunkSize */
1804 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1805 size_t numWords) {
1806 FreeChunk *curr = fl->head();
1807 size_t oldNumWords = curr->size();
1808 assert(numWords >= MinChunkSize, "Word size is too small");
1809 assert(curr != NULL, "List is empty");
1810 assert(oldNumWords >= numWords + MinChunkSize,
1811 "Size of chunks in the list is too small");
1813 fl->removeChunk(curr);
1814 // recorded indirectly by splitChunkAndReturnRemainder -
1815 // smallSplit(oldNumWords, numWords);
1816 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1817 // Does anything have to be done for the remainder in terms of
1818 // fixing the card table?
1819 assert(new_chunk == NULL || new_chunk->isFree(),
1820 "Should be returning a free chunk");
1821 return new_chunk;
1822 }
1824 FreeChunk*
1825 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1826 size_t new_size) {
1827 assert_locked();
1828 size_t size = chunk->size();
1829 assert(size > new_size, "Split from a smaller block?");
1830 assert(is_aligned(chunk), "alignment problem");
1831 assert(size == adjustObjectSize(size), "alignment problem");
1832 size_t rem_size = size - new_size;
1833 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1834 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1835 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1836 assert(is_aligned(ffc), "alignment problem");
1837 ffc->setSize(rem_size);
1838 ffc->linkNext(NULL);
1839 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1840 // Above must occur before BOT is updated below.
1841 // adjust block offset table
1842 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1843 if (rem_size < SmallForDictionary) {
1844 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1845 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1846 returnChunkToFreeList(ffc);
1847 split(size, rem_size);
1848 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1849 } else {
1850 returnChunkToDictionary(ffc);
1851 split(size ,rem_size);
1852 }
1853 chunk->setSize(new_size);
1854 return chunk;
1855 }
1857 void
1858 CompactibleFreeListSpace::sweep_completed() {
1859 // Now that space is probably plentiful, refill linear
1860 // allocation blocks as needed.
1861 refillLinearAllocBlocksIfNeeded();
1862 }
1864 void
1865 CompactibleFreeListSpace::gc_prologue() {
1866 assert_locked();
1867 if (PrintFLSStatistics != 0) {
1868 gclog_or_tty->print("Before GC:\n");
1869 reportFreeListStatistics();
1870 }
1871 refillLinearAllocBlocksIfNeeded();
1872 }
1874 void
1875 CompactibleFreeListSpace::gc_epilogue() {
1876 assert_locked();
1877 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1878 if (_smallLinearAllocBlock._word_size == 0)
1879 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1880 }
1881 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1882 _promoInfo.stopTrackingPromotions();
1883 repairLinearAllocationBlocks();
1884 // Print Space's stats
1885 if (PrintFLSStatistics != 0) {
1886 gclog_or_tty->print("After GC:\n");
1887 reportFreeListStatistics();
1888 }
1889 }
1891 // Iteration support, mostly delegated from a CMS generation
1893 void CompactibleFreeListSpace::save_marks() {
1894 // mark the "end" of the used space at the time of this call;
1895 // note, however, that promoted objects from this point
1896 // on are tracked in the _promoInfo below.
1897 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
1898 unallocated_block() : end());
1899 // inform allocator that promotions should be tracked.
1900 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1901 _promoInfo.startTrackingPromotions();
1902 }
1904 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1905 assert(_promoInfo.tracking(), "No preceding save_marks?");
1906 guarantee(SharedHeap::heap()->n_par_threads() == 0,
1907 "Shouldn't be called (yet) during parallel part of gc.");
1908 return _promoInfo.noPromotions();
1909 }
1911 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
1912 \
1913 void CompactibleFreeListSpace:: \
1914 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
1915 assert(SharedHeap::heap()->n_par_threads() == 0, \
1916 "Shouldn't be called (yet) during parallel part of gc."); \
1917 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
1918 /* \
1919 * This also restores any displaced headers and removes the elements from \
1920 * the iteration set as they are processed, so that we have a clean slate \
1921 * at the end of the iteration. Note, thus, that if new objects are \
1922 * promoted as a result of the iteration they are iterated over as well. \
1923 */ \
1924 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
1925 }
1927 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1930 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1931 // ugghh... how would one do this efficiently for a non-contiguous space?
1932 guarantee(false, "NYI");
1933 }
1935 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1936 return _smallLinearAllocBlock._word_size == 0;
1937 }
1939 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1940 // Fix up linear allocation blocks to look like free blocks
1941 repairLinearAllocBlock(&_smallLinearAllocBlock);
1942 }
1944 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1945 assert_locked();
1946 if (blk->_ptr != NULL) {
1947 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1948 "Minimum block size requirement");
1949 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1950 fc->setSize(blk->_word_size);
1951 fc->linkPrev(NULL); // mark as free
1952 fc->dontCoalesce();
1953 assert(fc->isFree(), "just marked it free");
1954 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1955 }
1956 }
1958 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1959 assert_locked();
1960 if (_smallLinearAllocBlock._ptr == NULL) {
1961 assert(_smallLinearAllocBlock._word_size == 0,
1962 "Size of linAB should be zero if the ptr is NULL");
1963 // Reset the linAB refill and allocation size limit.
1964 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1965 }
1966 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1967 }
1969 void
1970 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1971 assert_locked();
1972 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1973 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1974 "blk invariant");
1975 if (blk->_ptr == NULL) {
1976 refillLinearAllocBlock(blk);
1977 }
1978 if (PrintMiscellaneous && Verbose) {
1979 if (blk->_word_size == 0) {
1980 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1981 }
1982 }
1983 }
1985 void
1986 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1987 assert_locked();
1988 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1989 "linear allocation block should be empty");
1990 FreeChunk* fc;
1991 if (blk->_refillSize < SmallForDictionary &&
1992 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1993 // A linAB's strategy might be to use small sizes to reduce
1994 // fragmentation but still get the benefits of allocation from a
1995 // linAB.
1996 } else {
1997 fc = getChunkFromDictionary(blk->_refillSize);
1998 }
1999 if (fc != NULL) {
2000 blk->_ptr = (HeapWord*)fc;
2001 blk->_word_size = fc->size();
2002 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
2003 }
2004 }
2006 // Support for concurrent collection policy decisions.
2007 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2008 // In the future we might want to add in frgamentation stats --
2009 // including erosion of the "mountain" into this decision as well.
2010 return !adaptive_freelists() && linearAllocationWouldFail();
2011 }
2013 // Support for compaction
2015 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2016 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2017 // prepare_for_compaction() uses the space between live objects
2018 // so that later phase can skip dead space quickly. So verification
2019 // of the free lists doesn't work after.
2020 }
2022 #define obj_size(q) adjustObjectSize(oop(q)->size())
2023 #define adjust_obj_size(s) adjustObjectSize(s)
2025 void CompactibleFreeListSpace::adjust_pointers() {
2026 // In other versions of adjust_pointers(), a bail out
2027 // based on the amount of live data in the generation
2028 // (i.e., if 0, bail out) may be used.
2029 // Cannot test used() == 0 here because the free lists have already
2030 // been mangled by the compaction.
2032 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2033 // See note about verification in prepare_for_compaction().
2034 }
2036 void CompactibleFreeListSpace::compact() {
2037 SCAN_AND_COMPACT(obj_size);
2038 }
2040 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2041 // where fbs is free block sizes
2042 double CompactibleFreeListSpace::flsFrag() const {
2043 size_t itabFree = totalSizeInIndexedFreeLists();
2044 double frag = 0.0;
2045 size_t i;
2047 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2048 double sz = i;
2049 frag += _indexedFreeList[i].count() * (sz * sz);
2050 }
2052 double totFree = itabFree +
2053 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
2054 if (totFree > 0) {
2055 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2056 (totFree * totFree));
2057 frag = (double)1.0 - frag;
2058 } else {
2059 assert(frag == 0.0, "Follows from totFree == 0");
2060 }
2061 return frag;
2062 }
2064 void CompactibleFreeListSpace::beginSweepFLCensus(
2065 float inter_sweep_current,
2066 float inter_sweep_estimate,
2067 float intra_sweep_estimate) {
2068 assert_locked();
2069 size_t i;
2070 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2071 FreeList* fl = &_indexedFreeList[i];
2072 if (PrintFLSStatistics > 1) {
2073 gclog_or_tty->print("size[%d] : ", i);
2074 }
2075 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2076 fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2077 fl->set_beforeSweep(fl->count());
2078 fl->set_bfrSurp(fl->surplus());
2079 }
2080 _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
2081 inter_sweep_current,
2082 inter_sweep_estimate,
2083 intra_sweep_estimate);
2084 }
2086 void CompactibleFreeListSpace::setFLSurplus() {
2087 assert_locked();
2088 size_t i;
2089 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2090 FreeList *fl = &_indexedFreeList[i];
2091 fl->set_surplus(fl->count() -
2092 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2093 }
2094 }
2096 void CompactibleFreeListSpace::setFLHints() {
2097 assert_locked();
2098 size_t i;
2099 size_t h = IndexSetSize;
2100 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2101 FreeList *fl = &_indexedFreeList[i];
2102 fl->set_hint(h);
2103 if (fl->surplus() > 0) {
2104 h = i;
2105 }
2106 }
2107 }
2109 void CompactibleFreeListSpace::clearFLCensus() {
2110 assert_locked();
2111 int i;
2112 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2113 FreeList *fl = &_indexedFreeList[i];
2114 fl->set_prevSweep(fl->count());
2115 fl->set_coalBirths(0);
2116 fl->set_coalDeaths(0);
2117 fl->set_splitBirths(0);
2118 fl->set_splitDeaths(0);
2119 }
2120 }
2122 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2123 if (PrintFLSStatistics > 0) {
2124 HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
2125 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2126 largestAddr);
2127 }
2128 setFLSurplus();
2129 setFLHints();
2130 if (PrintGC && PrintFLSCensus > 0) {
2131 printFLCensus(sweep_count);
2132 }
2133 clearFLCensus();
2134 assert_locked();
2135 _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
2136 }
2138 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2139 if (size < SmallForDictionary) {
2140 FreeList *fl = &_indexedFreeList[size];
2141 return (fl->coalDesired() < 0) ||
2142 ((int)fl->count() > fl->coalDesired());
2143 } else {
2144 return dictionary()->coalDictOverPopulated(size);
2145 }
2146 }
2148 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2149 assert(size < SmallForDictionary, "Size too large for indexed list");
2150 FreeList *fl = &_indexedFreeList[size];
2151 fl->increment_coalBirths();
2152 fl->increment_surplus();
2153 }
2155 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2156 assert(size < SmallForDictionary, "Size too large for indexed list");
2157 FreeList *fl = &_indexedFreeList[size];
2158 fl->increment_coalDeaths();
2159 fl->decrement_surplus();
2160 }
2162 void CompactibleFreeListSpace::coalBirth(size_t size) {
2163 if (size < SmallForDictionary) {
2164 smallCoalBirth(size);
2165 } else {
2166 dictionary()->dictCensusUpdate(size,
2167 false /* split */,
2168 true /* birth */);
2169 }
2170 }
2172 void CompactibleFreeListSpace::coalDeath(size_t size) {
2173 if(size < SmallForDictionary) {
2174 smallCoalDeath(size);
2175 } else {
2176 dictionary()->dictCensusUpdate(size,
2177 false /* split */,
2178 false /* birth */);
2179 }
2180 }
2182 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2183 assert(size < SmallForDictionary, "Size too large for indexed list");
2184 FreeList *fl = &_indexedFreeList[size];
2185 fl->increment_splitBirths();
2186 fl->increment_surplus();
2187 }
2189 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2190 assert(size < SmallForDictionary, "Size too large for indexed list");
2191 FreeList *fl = &_indexedFreeList[size];
2192 fl->increment_splitDeaths();
2193 fl->decrement_surplus();
2194 }
2196 void CompactibleFreeListSpace::splitBirth(size_t size) {
2197 if (size < SmallForDictionary) {
2198 smallSplitBirth(size);
2199 } else {
2200 dictionary()->dictCensusUpdate(size,
2201 true /* split */,
2202 true /* birth */);
2203 }
2204 }
2206 void CompactibleFreeListSpace::splitDeath(size_t size) {
2207 if (size < SmallForDictionary) {
2208 smallSplitDeath(size);
2209 } else {
2210 dictionary()->dictCensusUpdate(size,
2211 true /* split */,
2212 false /* birth */);
2213 }
2214 }
2216 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2217 size_t to2 = from - to1;
2218 splitDeath(from);
2219 splitBirth(to1);
2220 splitBirth(to2);
2221 }
2223 void CompactibleFreeListSpace::print() const {
2224 tty->print(" CompactibleFreeListSpace");
2225 Space::print();
2226 }
2228 void CompactibleFreeListSpace::prepare_for_verify() {
2229 assert_locked();
2230 repairLinearAllocationBlocks();
2231 // Verify that the SpoolBlocks look like free blocks of
2232 // appropriate sizes... To be done ...
2233 }
2235 class VerifyAllBlksClosure: public BlkClosure {
2236 private:
2237 const CompactibleFreeListSpace* _sp;
2238 const MemRegion _span;
2240 public:
2241 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2242 MemRegion span) : _sp(sp), _span(span) { }
2244 virtual size_t do_blk(HeapWord* addr) {
2245 size_t res;
2246 if (_sp->block_is_obj(addr)) {
2247 oop p = oop(addr);
2248 guarantee(p->is_oop(), "Should be an oop");
2249 res = _sp->adjustObjectSize(p->size());
2250 if (_sp->obj_is_alive(addr)) {
2251 p->verify();
2252 }
2253 } else {
2254 FreeChunk* fc = (FreeChunk*)addr;
2255 res = fc->size();
2256 if (FLSVerifyLists && !fc->cantCoalesce()) {
2257 guarantee(_sp->verifyChunkInFreeLists(fc),
2258 "Chunk should be on a free list");
2259 }
2260 }
2261 guarantee(res != 0, "Livelock: no rank reduction!");
2262 return res;
2263 }
2264 };
2266 class VerifyAllOopsClosure: public OopClosure {
2267 private:
2268 const CMSCollector* _collector;
2269 const CompactibleFreeListSpace* _sp;
2270 const MemRegion _span;
2271 const bool _past_remark;
2272 const CMSBitMap* _bit_map;
2274 protected:
2275 void do_oop(void* p, oop obj) {
2276 if (_span.contains(obj)) { // the interior oop points into CMS heap
2277 if (!_span.contains(p)) { // reference from outside CMS heap
2278 // Should be a valid object; the first disjunct below allows
2279 // us to sidestep an assertion in block_is_obj() that insists
2280 // that p be in _sp. Note that several generations (and spaces)
2281 // are spanned by _span (CMS heap) above.
2282 guarantee(!_sp->is_in_reserved(obj) ||
2283 _sp->block_is_obj((HeapWord*)obj),
2284 "Should be an object");
2285 guarantee(obj->is_oop(), "Should be an oop");
2286 obj->verify();
2287 if (_past_remark) {
2288 // Remark has been completed, the object should be marked
2289 _bit_map->isMarked((HeapWord*)obj);
2290 }
2291 } else { // reference within CMS heap
2292 if (_past_remark) {
2293 // Remark has been completed -- so the referent should have
2294 // been marked, if referring object is.
2295 if (_bit_map->isMarked(_collector->block_start(p))) {
2296 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2297 }
2298 }
2299 }
2300 } else if (_sp->is_in_reserved(p)) {
2301 // the reference is from FLS, and points out of FLS
2302 guarantee(obj->is_oop(), "Should be an oop");
2303 obj->verify();
2304 }
2305 }
2307 template <class T> void do_oop_work(T* p) {
2308 T heap_oop = oopDesc::load_heap_oop(p);
2309 if (!oopDesc::is_null(heap_oop)) {
2310 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2311 do_oop(p, obj);
2312 }
2313 }
2315 public:
2316 VerifyAllOopsClosure(const CMSCollector* collector,
2317 const CompactibleFreeListSpace* sp, MemRegion span,
2318 bool past_remark, CMSBitMap* bit_map) :
2319 OopClosure(), _collector(collector), _sp(sp), _span(span),
2320 _past_remark(past_remark), _bit_map(bit_map) { }
2322 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2323 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2324 };
2326 void CompactibleFreeListSpace::verify(bool ignored) const {
2327 assert_lock_strong(&_freelistLock);
2328 verify_objects_initialized();
2329 MemRegion span = _collector->_span;
2330 bool past_remark = (_collector->abstract_state() ==
2331 CMSCollector::Sweeping);
2333 ResourceMark rm;
2334 HandleMark hm;
2336 // Check integrity of CFL data structures
2337 _promoInfo.verify();
2338 _dictionary->verify();
2339 if (FLSVerifyIndexTable) {
2340 verifyIndexedFreeLists();
2341 }
2342 // Check integrity of all objects and free blocks in space
2343 {
2344 VerifyAllBlksClosure cl(this, span);
2345 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
2346 }
2347 // Check that all references in the heap to FLS
2348 // are to valid objects in FLS or that references in
2349 // FLS are to valid objects elsewhere in the heap
2350 if (FLSVerifyAllHeapReferences)
2351 {
2352 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2353 _collector->markBitMap());
2354 CollectedHeap* ch = Universe::heap();
2355 ch->oop_iterate(&cl); // all oops in generations
2356 ch->permanent_oop_iterate(&cl); // all oops in perm gen
2357 }
2359 if (VerifyObjectStartArray) {
2360 // Verify the block offset table
2361 _bt.verify();
2362 }
2363 }
2365 #ifndef PRODUCT
2366 void CompactibleFreeListSpace::verifyFreeLists() const {
2367 if (FLSVerifyLists) {
2368 _dictionary->verify();
2369 verifyIndexedFreeLists();
2370 } else {
2371 if (FLSVerifyDictionary) {
2372 _dictionary->verify();
2373 }
2374 if (FLSVerifyIndexTable) {
2375 verifyIndexedFreeLists();
2376 }
2377 }
2378 }
2379 #endif
2381 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2382 size_t i = 0;
2383 for (; i < MinChunkSize; i++) {
2384 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2385 }
2386 for (; i < IndexSetSize; i++) {
2387 verifyIndexedFreeList(i);
2388 }
2389 }
2391 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2392 FreeChunk* fc = _indexedFreeList[size].head();
2393 FreeChunk* tail = _indexedFreeList[size].tail();
2394 size_t num = _indexedFreeList[size].count();
2395 size_t n = 0;
2396 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2397 for (; fc != NULL; fc = fc->next(), n++) {
2398 guarantee(fc->size() == size, "Size inconsistency");
2399 guarantee(fc->isFree(), "!free?");
2400 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2401 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2402 }
2403 guarantee(n == num, "Incorrect count");
2404 }
2406 #ifndef PRODUCT
2407 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2408 assert(_dictionary->minSize() <= IndexSetSize,
2409 "Some sizes can't be allocated without recourse to"
2410 " linear allocation buffers");
2411 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2412 "else MIN_TREE_CHUNK_SIZE is wrong");
2413 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2414 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2415 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2416 "Some for-loops may be incorrectly initialized");
2417 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2418 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2419 }
2420 #endif
2422 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2423 assert_lock_strong(&_freelistLock);
2424 FreeList total;
2425 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2426 FreeList::print_labels_on(gclog_or_tty, "size");
2427 size_t totalFree = 0;
2428 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2429 const FreeList *fl = &_indexedFreeList[i];
2430 totalFree += fl->count() * fl->size();
2431 if (i % (40*IndexSetStride) == 0) {
2432 FreeList::print_labels_on(gclog_or_tty, "size");
2433 }
2434 fl->print_on(gclog_or_tty);
2435 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
2436 total.set_surplus( total.surplus() + fl->surplus() );
2437 total.set_desired( total.desired() + fl->desired() );
2438 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
2439 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2440 total.set_count( total.count() + fl->count() );
2441 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
2442 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
2443 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2444 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2445 }
2446 total.print_on(gclog_or_tty, "TOTAL");
2447 gclog_or_tty->print_cr("Total free in indexed lists "
2448 SIZE_FORMAT " words", totalFree);
2449 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2450 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2451 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2452 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2453 _dictionary->printDictCensus();
2454 }
2456 ///////////////////////////////////////////////////////////////////////////
2457 // CFLS_LAB
2458 ///////////////////////////////////////////////////////////////////////////
2460 #define VECTOR_257(x) \
2461 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2462 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2463 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2464 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2465 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2466 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2467 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2468 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2469 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2470 x }
2472 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2473 // OldPLABSize, whose static default is different; if overridden at the
2474 // command-line, this will get reinitialized via a call to
2475 // modify_initialization() below.
2476 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
2477 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2478 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
2479 int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2481 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2482 _cfls(cfls)
2483 {
2484 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2485 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2486 i < CompactibleFreeListSpace::IndexSetSize;
2487 i += CompactibleFreeListSpace::IndexSetStride) {
2488 _indexedFreeList[i].set_size(i);
2489 _num_blocks[i] = 0;
2490 }
2491 }
2493 static bool _CFLS_LAB_modified = false;
2495 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2496 assert(!_CFLS_LAB_modified, "Call only once");
2497 _CFLS_LAB_modified = true;
2498 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2499 i < CompactibleFreeListSpace::IndexSetSize;
2500 i += CompactibleFreeListSpace::IndexSetStride) {
2501 _blocks_to_claim[i].modify(n, wt, true /* force */);
2502 }
2503 }
2505 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2506 FreeChunk* res;
2507 word_sz = _cfls->adjustObjectSize(word_sz);
2508 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2509 // This locking manages sync with other large object allocations.
2510 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2511 Mutex::_no_safepoint_check_flag);
2512 res = _cfls->getChunkFromDictionaryExact(word_sz);
2513 if (res == NULL) return NULL;
2514 } else {
2515 FreeList* fl = &_indexedFreeList[word_sz];
2516 if (fl->count() == 0) {
2517 // Attempt to refill this local free list.
2518 get_from_global_pool(word_sz, fl);
2519 // If it didn't work, give up.
2520 if (fl->count() == 0) return NULL;
2521 }
2522 res = fl->getChunkAtHead();
2523 assert(res != NULL, "Why was count non-zero?");
2524 }
2525 res->markNotFree();
2526 assert(!res->isFree(), "shouldn't be marked free");
2527 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2528 // mangle a just allocated object with a distinct pattern.
2529 debug_only(res->mangleAllocated(word_sz));
2530 return (HeapWord*)res;
2531 }
2533 // Get a chunk of blocks of the right size and update related
2534 // book-keeping stats
2535 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
2536 // Get the #blocks we want to claim
2537 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2538 assert(n_blks > 0, "Error");
2539 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2540 // In some cases, when the application has a phase change,
2541 // there may be a sudden and sharp shift in the object survival
2542 // profile, and updating the counts at the end of a scavenge
2543 // may not be quick enough, giving rise to large scavenge pauses
2544 // during these phase changes. It is beneficial to detect such
2545 // changes on-the-fly during a scavenge and avoid such a phase-change
2546 // pothole. The following code is a heuristic attempt to do that.
2547 // It is protected by a product flag until we have gained
2548 // enough experience with this heuristic and fine-tuned its behaviour.
2549 // WARNING: This might increase fragmentation if we overreact to
2550 // small spikes, so some kind of historical smoothing based on
2551 // previous experience with the greater reactivity might be useful.
2552 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2553 // default.
2554 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2555 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2556 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
2557 n_blks = MIN2(n_blks, CMSOldPLABMax);
2558 }
2559 assert(n_blks > 0, "Error");
2560 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2561 // Update stats table entry for this block size
2562 _num_blocks[word_sz] += fl->count();
2563 }
2565 void CFLS_LAB::compute_desired_plab_size() {
2566 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2567 i < CompactibleFreeListSpace::IndexSetSize;
2568 i += CompactibleFreeListSpace::IndexSetStride) {
2569 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2570 "Counter inconsistency");
2571 if (_global_num_workers[i] > 0) {
2572 // Need to smooth wrt historical average
2573 if (ResizeOldPLAB) {
2574 _blocks_to_claim[i].sample(
2575 MAX2((size_t)CMSOldPLABMin,
2576 MIN2((size_t)CMSOldPLABMax,
2577 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2578 }
2579 // Reset counters for next round
2580 _global_num_workers[i] = 0;
2581 _global_num_blocks[i] = 0;
2582 if (PrintOldPLAB) {
2583 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2584 }
2585 }
2586 }
2587 }
2589 void CFLS_LAB::retire(int tid) {
2590 // We run this single threaded with the world stopped;
2591 // so no need for locks and such.
2592 #define CFLS_LAB_PARALLEL_ACCESS 0
2593 NOT_PRODUCT(Thread* t = Thread::current();)
2594 assert(Thread::current()->is_VM_thread(), "Error");
2595 assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
2596 "Will access to uninitialized slot below");
2597 #if CFLS_LAB_PARALLEL_ACCESS
2598 for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
2599 i > 0;
2600 i -= CompactibleFreeListSpace::IndexSetStride) {
2601 #else // CFLS_LAB_PARALLEL_ACCESS
2602 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2603 i < CompactibleFreeListSpace::IndexSetSize;
2604 i += CompactibleFreeListSpace::IndexSetStride) {
2605 #endif // !CFLS_LAB_PARALLEL_ACCESS
2606 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2607 "Can't retire more than what we obtained");
2608 if (_num_blocks[i] > 0) {
2609 size_t num_retire = _indexedFreeList[i].count();
2610 assert(_num_blocks[i] > num_retire, "Should have used at least one");
2611 {
2612 #if CFLS_LAB_PARALLEL_ACCESS
2613 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2614 Mutex::_no_safepoint_check_flag);
2615 #endif // CFLS_LAB_PARALLEL_ACCESS
2616 // Update globals stats for num_blocks used
2617 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2618 _global_num_workers[i]++;
2619 assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
2620 if (num_retire > 0) {
2621 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2622 // Reset this list.
2623 _indexedFreeList[i] = FreeList();
2624 _indexedFreeList[i].set_size(i);
2625 }
2626 }
2627 if (PrintOldPLAB) {
2628 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2629 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2630 }
2631 // Reset stats for next round
2632 _num_blocks[i] = 0;
2633 }
2634 }
2635 }
2637 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2638 assert(fl->count() == 0, "Precondition.");
2639 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2640 "Precondition");
2642 // We'll try all multiples of word_sz in the indexed set, starting with
2643 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2644 // then try getting a big chunk and splitting it.
2645 {
2646 bool found;
2647 int k;
2648 size_t cur_sz;
2649 for (k = 1, cur_sz = k * word_sz, found = false;
2650 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2651 (CMSSplitIndexedFreeListBlocks || k <= 1);
2652 k++, cur_sz = k * word_sz) {
2653 FreeList* gfl = &_indexedFreeList[cur_sz];
2654 FreeList fl_for_cur_sz; // Empty.
2655 fl_for_cur_sz.set_size(cur_sz);
2656 {
2657 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2658 Mutex::_no_safepoint_check_flag);
2659 if (gfl->count() != 0) {
2660 // nn is the number of chunks of size cur_sz that
2661 // we'd need to split k-ways each, in order to create
2662 // "n" chunks of size word_sz each.
2663 const size_t nn = MAX2(n/k, (size_t)1);
2664 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2665 found = true;
2666 if (k > 1) {
2667 // Update split death stats for the cur_sz-size blocks list:
2668 // we increment the split death count by the number of blocks
2669 // we just took from the cur_sz-size blocks list and which
2670 // we will be splitting below.
2671 ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
2672 fl_for_cur_sz.count();
2673 _indexedFreeList[cur_sz].set_splitDeaths(deaths);
2674 }
2675 }
2676 }
2677 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2678 if (found) {
2679 if (k == 1) {
2680 fl->prepend(&fl_for_cur_sz);
2681 } else {
2682 // Divide each block on fl_for_cur_sz up k ways.
2683 FreeChunk* fc;
2684 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2685 // Must do this in reverse order, so that anybody attempting to
2686 // access the main chunk sees it as a single free block until we
2687 // change it.
2688 size_t fc_size = fc->size();
2689 for (int i = k-1; i >= 0; i--) {
2690 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2691 ffc->setSize(word_sz);
2692 ffc->linkNext(NULL);
2693 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2694 // Above must occur before BOT is updated below.
2695 // splitting from the right, fc_size == (k - i + 1) * wordsize
2696 _bt.mark_block((HeapWord*)ffc, word_sz);
2697 fc_size -= word_sz;
2698 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2699 _bt.verify_single_block((HeapWord*)fc, fc_size);
2700 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2701 // Push this on "fl".
2702 fl->returnChunkAtHead(ffc);
2703 }
2704 // TRAP
2705 assert(fl->tail()->next() == NULL, "List invariant.");
2706 }
2707 }
2708 // Update birth stats for this block size.
2709 size_t num = fl->count();
2710 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2711 Mutex::_no_safepoint_check_flag);
2712 ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
2713 _indexedFreeList[word_sz].set_splitBirths(births);
2714 return;
2715 }
2716 }
2717 }
2718 // Otherwise, we'll split a block from the dictionary.
2719 FreeChunk* fc = NULL;
2720 FreeChunk* rem_fc = NULL;
2721 size_t rem;
2722 {
2723 MutexLockerEx x(parDictionaryAllocLock(),
2724 Mutex::_no_safepoint_check_flag);
2725 while (n > 0) {
2726 fc = dictionary()->getChunk(MAX2(n * word_sz,
2727 _dictionary->minSize()),
2728 FreeBlockDictionary::atLeast);
2729 if (fc != NULL) {
2730 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
2731 dictionary()->dictCensusUpdate(fc->size(),
2732 true /*split*/,
2733 false /*birth*/);
2734 break;
2735 } else {
2736 n--;
2737 }
2738 }
2739 if (fc == NULL) return;
2740 assert((ssize_t)n >= 1, "Control point invariant");
2741 // Otherwise, split up that block.
2742 const size_t nn = fc->size() / word_sz;
2743 n = MIN2(nn, n);
2744 assert((ssize_t)n >= 1, "Control point invariant");
2745 rem = fc->size() - n * word_sz;
2746 // If there is a remainder, and it's too small, allocate one fewer.
2747 if (rem > 0 && rem < MinChunkSize) {
2748 n--; rem += word_sz;
2749 }
2750 // Note that at this point we may have n == 0.
2751 assert((ssize_t)n >= 0, "Control point invariant");
2753 // If n is 0, the chunk fc that was found is not large
2754 // enough to leave a viable remainder. We are unable to
2755 // allocate even one block. Return fc to the
2756 // dictionary and return, leaving "fl" empty.
2757 if (n == 0) {
2758 returnChunkToDictionary(fc);
2759 return;
2760 }
2762 // First return the remainder, if any.
2763 // Note that we hold the lock until we decide if we're going to give
2764 // back the remainder to the dictionary, since a concurrent allocation
2765 // may otherwise see the heap as empty. (We're willing to take that
2766 // hit if the block is a small block.)
2767 if (rem > 0) {
2768 size_t prefix_size = n * word_sz;
2769 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2770 rem_fc->setSize(rem);
2771 rem_fc->linkNext(NULL);
2772 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2773 // Above must occur before BOT is updated below.
2774 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2775 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2776 if (rem >= IndexSetSize) {
2777 returnChunkToDictionary(rem_fc);
2778 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
2779 rem_fc = NULL;
2780 }
2781 // Otherwise, return it to the small list below.
2782 }
2783 }
2784 if (rem_fc != NULL) {
2785 MutexLockerEx x(_indexedFreeListParLocks[rem],
2786 Mutex::_no_safepoint_check_flag);
2787 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2788 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2789 smallSplitBirth(rem);
2790 }
2791 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2792 // Now do the splitting up.
2793 // Must do this in reverse order, so that anybody attempting to
2794 // access the main chunk sees it as a single free block until we
2795 // change it.
2796 size_t fc_size = n * word_sz;
2797 // All but first chunk in this loop
2798 for (ssize_t i = n-1; i > 0; i--) {
2799 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2800 ffc->setSize(word_sz);
2801 ffc->linkNext(NULL);
2802 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2803 // Above must occur before BOT is updated below.
2804 // splitting from the right, fc_size == (n - i + 1) * wordsize
2805 _bt.mark_block((HeapWord*)ffc, word_sz);
2806 fc_size -= word_sz;
2807 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2808 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2809 _bt.verify_single_block((HeapWord*)fc, fc_size);
2810 // Push this on "fl".
2811 fl->returnChunkAtHead(ffc);
2812 }
2813 // First chunk
2814 fc->setSize(word_sz);
2815 fc->linkNext(NULL);
2816 fc->linkPrev(NULL);
2817 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2818 _bt.verify_single_block((HeapWord*)fc, fc->size());
2819 fl->returnChunkAtHead(fc);
2821 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2822 {
2823 // Update the stats for this block size.
2824 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2825 Mutex::_no_safepoint_check_flag);
2826 const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
2827 _indexedFreeList[word_sz].set_splitBirths(births);
2828 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2829 // _indexedFreeList[word_sz].set_surplus(new_surplus);
2830 }
2832 // TRAP
2833 assert(fl->tail()->next() == NULL, "List invariant.");
2834 }
2836 // Set up the space's par_seq_tasks structure for work claiming
2837 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2838 // XXX Need to suitably abstract and generalize this and the next
2839 // method into one.
2840 void
2841 CompactibleFreeListSpace::
2842 initialize_sequential_subtasks_for_rescan(int n_threads) {
2843 // The "size" of each task is fixed according to rescan_task_size.
2844 assert(n_threads > 0, "Unexpected n_threads argument");
2845 const size_t task_size = rescan_task_size();
2846 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2847 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2848 assert(n_tasks == 0 ||
2849 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2850 (used_region().start() + n_tasks*task_size >= used_region().end())),
2851 "n_tasks calculation incorrect");
2852 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2853 assert(!pst->valid(), "Clobbering existing data?");
2854 pst->set_par_threads(n_threads);
2855 pst->set_n_tasks((int)n_tasks);
2856 }
2858 // Set up the space's par_seq_tasks structure for work claiming
2859 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2860 void
2861 CompactibleFreeListSpace::
2862 initialize_sequential_subtasks_for_marking(int n_threads,
2863 HeapWord* low) {
2864 // The "size" of each task is fixed according to rescan_task_size.
2865 assert(n_threads > 0, "Unexpected n_threads argument");
2866 const size_t task_size = marking_task_size();
2867 assert(task_size > CardTableModRefBS::card_size_in_words &&
2868 (task_size % CardTableModRefBS::card_size_in_words == 0),
2869 "Otherwise arithmetic below would be incorrect");
2870 MemRegion span = _gen->reserved();
2871 if (low != NULL) {
2872 if (span.contains(low)) {
2873 // Align low down to a card boundary so that
2874 // we can use block_offset_careful() on span boundaries.
2875 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2876 CardTableModRefBS::card_size);
2877 // Clip span prefix at aligned_low
2878 span = span.intersection(MemRegion(aligned_low, span.end()));
2879 } else if (low > span.end()) {
2880 span = MemRegion(low, low); // Null region
2881 } // else use entire span
2882 }
2883 assert(span.is_empty() ||
2884 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
2885 "span should start at a card boundary");
2886 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2887 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2888 assert(n_tasks == 0 ||
2889 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2890 (span.start() + n_tasks*task_size >= span.end())),
2891 "n_tasks calculation incorrect");
2892 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2893 assert(!pst->valid(), "Clobbering existing data?");
2894 pst->set_par_threads(n_threads);
2895 pst->set_n_tasks((int)n_tasks);
2896 }