Thu, 20 Nov 2008 16:56:09 -0800
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_compactibleFreeListSpace.cpp.incl"
28 /////////////////////////////////////////////////////////////////////////
29 //// CompactibleFreeListSpace
30 /////////////////////////////////////////////////////////////////////////
32 // highest ranked free list lock rank
33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
35 // Constructor
36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
37 MemRegion mr, bool use_adaptive_freelists,
38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
39 _dictionaryChoice(dictionaryChoice),
40 _adaptive_freelists(use_adaptive_freelists),
41 _bt(bs, mr),
42 // free list locks are in the range of values taken by _lockRank
43 // This range currently is [_leaf+2, _leaf+3]
44 // Note: this requires that CFLspace c'tors
45 // are called serially in the order in which the locks are
46 // are acquired in the program text. This is true today.
47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
49 "CompactibleFreeListSpace._dict_par_lock", true),
50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
51 CMSRescanMultiple),
52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
53 CMSConcMarkMultiple),
54 _collector(NULL)
55 {
56 _bt.set_space(this);
57 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
58 // We have all of "mr", all of which we place in the dictionary
59 // as one big chunk. We'll need to decide here which of several
60 // possible alternative dictionary implementations to use. For
61 // now the choice is easy, since we have only one working
62 // implementation, namely, the simple binary tree (splaying
63 // temporarily disabled).
64 switch (dictionaryChoice) {
65 case FreeBlockDictionary::dictionaryBinaryTree:
66 _dictionary = new BinaryTreeDictionary(mr);
67 break;
68 case FreeBlockDictionary::dictionarySplayTree:
69 case FreeBlockDictionary::dictionarySkipList:
70 default:
71 warning("dictionaryChoice: selected option not understood; using"
72 " default BinaryTreeDictionary implementation instead.");
73 _dictionary = new BinaryTreeDictionary(mr);
74 break;
75 }
76 splitBirth(mr.word_size());
77 assert(_dictionary != NULL, "CMS dictionary initialization");
78 // The indexed free lists are initially all empty and are lazily
79 // filled in on demand. Initialize the array elements to NULL.
80 initializeIndexedFreeListArray();
82 // Not using adaptive free lists assumes that allocation is first
83 // from the linAB's. Also a cms perm gen which can be compacted
84 // has to have the klass's klassKlass allocated at a lower
85 // address in the heap than the klass so that the klassKlass is
86 // moved to its new location before the klass is moved.
87 // Set the _refillSize for the linear allocation blocks
88 if (!use_adaptive_freelists) {
89 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
90 // The small linAB initially has all the space and will allocate
91 // a chunk of any size.
92 HeapWord* addr = (HeapWord*) fc;
93 _smallLinearAllocBlock.set(addr, fc->size() ,
94 1024*SmallForLinearAlloc, fc->size());
95 // Note that _unallocated_block is not updated here.
96 // Allocations from the linear allocation block should
97 // update it.
98 } else {
99 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
100 SmallForLinearAlloc);
101 }
102 // CMSIndexedFreeListReplenish should be at least 1
103 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
104 _promoInfo.setSpace(this);
105 if (UseCMSBestFit) {
106 _fitStrategy = FreeBlockBestFitFirst;
107 } else {
108 _fitStrategy = FreeBlockStrategyNone;
109 }
110 checkFreeListConsistency();
112 // Initialize locks for parallel case.
113 if (ParallelGCThreads > 0) {
114 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
115 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
116 "a freelist par lock",
117 true);
118 if (_indexedFreeListParLocks[i] == NULL)
119 vm_exit_during_initialization("Could not allocate a par lock");
120 DEBUG_ONLY(
121 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
122 )
123 }
124 _dictionary->set_par_lock(&_parDictionaryAllocLock);
125 }
126 }
128 // Like CompactibleSpace forward() but always calls cross_threshold() to
129 // update the block offset table. Removed initialize_threshold call because
130 // CFLS does not use a block offset array for contiguous spaces.
131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
132 CompactPoint* cp, HeapWord* compact_top) {
133 // q is alive
134 // First check if we should switch compaction space
135 assert(this == cp->space, "'this' should be current compaction space.");
136 size_t compaction_max_size = pointer_delta(end(), compact_top);
137 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
138 "virtual adjustObjectSize_v() method is not correct");
139 size_t adjusted_size = adjustObjectSize(size);
140 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
141 "no small fragments allowed");
142 assert(minimum_free_block_size() == MinChunkSize,
143 "for de-virtualized reference below");
144 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
145 if (adjusted_size + MinChunkSize > compaction_max_size &&
146 adjusted_size != compaction_max_size) {
147 do {
148 // switch to next compaction space
149 cp->space->set_compaction_top(compact_top);
150 cp->space = cp->space->next_compaction_space();
151 if (cp->space == NULL) {
152 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
153 assert(cp->gen != NULL, "compaction must succeed");
154 cp->space = cp->gen->first_compaction_space();
155 assert(cp->space != NULL, "generation must have a first compaction space");
156 }
157 compact_top = cp->space->bottom();
158 cp->space->set_compaction_top(compact_top);
159 // The correct adjusted_size may not be the same as that for this method
160 // (i.e., cp->space may no longer be "this" so adjust the size again.
161 // Use the virtual method which is not used above to save the virtual
162 // dispatch.
163 adjusted_size = cp->space->adjust_object_size_v(size);
164 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
165 assert(cp->space->minimum_free_block_size() == 0, "just checking");
166 } while (adjusted_size > compaction_max_size);
167 }
169 // store the forwarding pointer into the mark word
170 if ((HeapWord*)q != compact_top) {
171 q->forward_to(oop(compact_top));
172 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
173 } else {
174 // if the object isn't moving we can just set the mark to the default
175 // mark and handle it specially later on.
176 q->init_mark();
177 assert(q->forwardee() == NULL, "should be forwarded to NULL");
178 }
180 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
181 compact_top += adjusted_size;
183 // we need to update the offset table so that the beginnings of objects can be
184 // found during scavenge. Note that we are updating the offset table based on
185 // where the object will be once the compaction phase finishes.
187 // Always call cross_threshold(). A contiguous space can only call it when
188 // the compaction_top exceeds the current threshold but not for an
189 // non-contiguous space.
190 cp->threshold =
191 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
192 return compact_top;
193 }
195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
196 // and use of single_block instead of alloc_block. The name here is not really
197 // appropriate - maybe a more general name could be invented for both the
198 // contiguous and noncontiguous spaces.
200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
201 _bt.single_block(start, the_end);
202 return end();
203 }
205 // Initialize them to NULL.
206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
207 for (size_t i = 0; i < IndexSetSize; i++) {
208 // Note that on platforms where objects are double word aligned,
209 // the odd array elements are not used. It is convenient, however,
210 // to map directly from the object size to the array element.
211 _indexedFreeList[i].reset(IndexSetSize);
212 _indexedFreeList[i].set_size(i);
213 assert(_indexedFreeList[i].count() == 0, "reset check failed");
214 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
215 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
216 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
217 }
218 }
220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
221 for (int i = 1; i < IndexSetSize; i++) {
222 assert(_indexedFreeList[i].size() == (size_t) i,
223 "Indexed free list sizes are incorrect");
224 _indexedFreeList[i].reset(IndexSetSize);
225 assert(_indexedFreeList[i].count() == 0, "reset check failed");
226 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
227 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
228 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
229 }
230 }
232 void CompactibleFreeListSpace::reset(MemRegion mr) {
233 resetIndexedFreeListArray();
234 dictionary()->reset();
235 if (BlockOffsetArrayUseUnallocatedBlock) {
236 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
237 // Everything's allocated until proven otherwise.
238 _bt.set_unallocated_block(end());
239 }
240 if (!mr.is_empty()) {
241 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
242 _bt.single_block(mr.start(), mr.word_size());
243 FreeChunk* fc = (FreeChunk*) mr.start();
244 fc->setSize(mr.word_size());
245 if (mr.word_size() >= IndexSetSize ) {
246 returnChunkToDictionary(fc);
247 } else {
248 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
249 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
250 }
251 }
252 _promoInfo.reset();
253 _smallLinearAllocBlock._ptr = NULL;
254 _smallLinearAllocBlock._word_size = 0;
255 }
257 void CompactibleFreeListSpace::reset_after_compaction() {
258 // Reset the space to the new reality - one free chunk.
259 MemRegion mr(compaction_top(), end());
260 reset(mr);
261 // Now refill the linear allocation block(s) if possible.
262 if (_adaptive_freelists) {
263 refillLinearAllocBlocksIfNeeded();
264 } else {
265 // Place as much of mr in the linAB as we can get,
266 // provided it was big enough to go into the dictionary.
267 FreeChunk* fc = dictionary()->findLargestDict();
268 if (fc != NULL) {
269 assert(fc->size() == mr.word_size(),
270 "Why was the chunk broken up?");
271 removeChunkFromDictionary(fc);
272 HeapWord* addr = (HeapWord*) fc;
273 _smallLinearAllocBlock.set(addr, fc->size() ,
274 1024*SmallForLinearAlloc, fc->size());
275 // Note that _unallocated_block is not updated here.
276 }
277 }
278 }
280 // Walks the entire dictionary, returning a coterminal
281 // chunk, if it exists. Use with caution since it involves
282 // a potentially complete walk of a potentially large tree.
283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
285 assert_lock_strong(&_freelistLock);
287 return dictionary()->find_chunk_ends_at(end());
288 }
291 #ifndef PRODUCT
292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
293 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
294 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
295 }
296 }
298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
299 size_t sum = 0;
300 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
301 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
302 }
303 return sum;
304 }
306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
307 size_t count = 0;
308 for (int i = MinChunkSize; i < IndexSetSize; i++) {
309 debug_only(
310 ssize_t total_list_count = 0;
311 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
312 fc = fc->next()) {
313 total_list_count++;
314 }
315 assert(total_list_count == _indexedFreeList[i].count(),
316 "Count in list is incorrect");
317 )
318 count += _indexedFreeList[i].count();
319 }
320 return count;
321 }
323 size_t CompactibleFreeListSpace::totalCount() {
324 size_t num = totalCountInIndexedFreeLists();
325 num += dictionary()->totalCount();
326 if (_smallLinearAllocBlock._word_size != 0) {
327 num++;
328 }
329 return num;
330 }
331 #endif
333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
334 FreeChunk* fc = (FreeChunk*) p;
335 return fc->isFree();
336 }
338 size_t CompactibleFreeListSpace::used() const {
339 return capacity() - free();
340 }
342 size_t CompactibleFreeListSpace::free() const {
343 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
344 // if you do this while the structures are in flux you
345 // may get an approximate answer only; for instance
346 // because there is concurrent allocation either
347 // directly by mutators or for promotion during a GC.
348 // It's "MT-safe", however, in the sense that you are guaranteed
349 // not to crash and burn, for instance, because of walking
350 // pointers that could disappear as you were walking them.
351 // The approximation is because the various components
352 // that are read below are not read atomically (and
353 // further the computation of totalSizeInIndexedFreeLists()
354 // is itself a non-atomic computation. The normal use of
355 // this is during a resize operation at the end of GC
356 // and at that time you are guaranteed to get the
357 // correct actual value. However, for instance, this is
358 // also read completely asynchronously by the "perf-sampler"
359 // that supports jvmstat, and you are apt to see the values
360 // flicker in such cases.
361 assert(_dictionary != NULL, "No _dictionary?");
362 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
363 totalSizeInIndexedFreeLists() +
364 _smallLinearAllocBlock._word_size) * HeapWordSize;
365 }
367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
368 assert(_dictionary != NULL, "No _dictionary?");
369 assert_locked();
370 size_t res = _dictionary->maxChunkSize();
371 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
372 (size_t) SmallForLinearAlloc - 1));
373 // XXX the following could potentially be pretty slow;
374 // should one, pesimally for the rare cases when res
375 // caclulated above is less than IndexSetSize,
376 // just return res calculated above? My reasoning was that
377 // those cases will be so rare that the extra time spent doesn't
378 // really matter....
379 // Note: do not change the loop test i >= res + IndexSetStride
380 // to i > res below, because i is unsigned and res may be zero.
381 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
382 i -= IndexSetStride) {
383 if (_indexedFreeList[i].head() != NULL) {
384 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
385 return i;
386 }
387 }
388 return res;
389 }
391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
392 assert_lock_strong(&_freelistLock);
393 assert(PrintFLSStatistics != 0, "Reporting error");
394 _dictionary->reportStatistics();
395 if (PrintFLSStatistics > 1) {
396 reportIndexedFreeListStatistics();
397 size_t totalSize = totalSizeInIndexedFreeLists() +
398 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
399 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
400 }
401 }
403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
404 assert_lock_strong(&_freelistLock);
405 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
406 "--------------------------------\n");
407 size_t totalSize = totalSizeInIndexedFreeLists();
408 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
409 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
410 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
411 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
412 if (freeBlocks != 0) {
413 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
414 }
415 }
417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
418 size_t res = 0;
419 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
420 debug_only(
421 ssize_t recount = 0;
422 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
423 fc = fc->next()) {
424 recount += 1;
425 }
426 assert(recount == _indexedFreeList[i].count(),
427 "Incorrect count in list");
428 )
429 res += _indexedFreeList[i].count();
430 }
431 return res;
432 }
434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
435 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
436 if (_indexedFreeList[i].head() != NULL) {
437 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
438 return (size_t)i;
439 }
440 }
441 return 0;
442 }
444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
445 HeapWord* prevEnd = end();
446 assert(prevEnd != value, "unnecessary set_end call");
447 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
448 _end = value;
449 if (prevEnd != NULL) {
450 // Resize the underlying block offset table.
451 _bt.resize(pointer_delta(value, bottom()));
452 if (value <= prevEnd) {
453 assert(value >= unallocated_block(), "New end is below unallocated block");
454 } else {
455 // Now, take this new chunk and add it to the free blocks.
456 // Note that the BOT has not yet been updated for this block.
457 size_t newFcSize = pointer_delta(value, prevEnd);
458 // XXX This is REALLY UGLY and should be fixed up. XXX
459 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
460 // Mark the boundary of the new block in BOT
461 _bt.mark_block(prevEnd, value);
462 // put it all in the linAB
463 if (ParallelGCThreads == 0) {
464 _smallLinearAllocBlock._ptr = prevEnd;
465 _smallLinearAllocBlock._word_size = newFcSize;
466 repairLinearAllocBlock(&_smallLinearAllocBlock);
467 } else { // ParallelGCThreads > 0
468 MutexLockerEx x(parDictionaryAllocLock(),
469 Mutex::_no_safepoint_check_flag);
470 _smallLinearAllocBlock._ptr = prevEnd;
471 _smallLinearAllocBlock._word_size = newFcSize;
472 repairLinearAllocBlock(&_smallLinearAllocBlock);
473 }
474 // Births of chunks put into a LinAB are not recorded. Births
475 // of chunks as they are allocated out of a LinAB are.
476 } else {
477 // Add the block to the free lists, if possible coalescing it
478 // with the last free block, and update the BOT and census data.
479 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
480 }
481 }
482 }
483 }
485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
486 CompactibleFreeListSpace* _cfls;
487 CMSCollector* _collector;
488 protected:
489 // Override.
490 #define walk_mem_region_with_cl_DECL(ClosureType) \
491 virtual void walk_mem_region_with_cl(MemRegion mr, \
492 HeapWord* bottom, HeapWord* top, \
493 ClosureType* cl); \
494 void walk_mem_region_with_cl_par(MemRegion mr, \
495 HeapWord* bottom, HeapWord* top, \
496 ClosureType* cl); \
497 void walk_mem_region_with_cl_nopar(MemRegion mr, \
498 HeapWord* bottom, HeapWord* top, \
499 ClosureType* cl)
500 walk_mem_region_with_cl_DECL(OopClosure);
501 walk_mem_region_with_cl_DECL(FilteringClosure);
503 public:
504 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
505 CMSCollector* collector,
506 OopClosure* cl,
507 CardTableModRefBS::PrecisionStyle precision,
508 HeapWord* boundary) :
509 Filtering_DCTOC(sp, cl, precision, boundary),
510 _cfls(sp), _collector(collector) {}
511 };
513 // We de-virtualize the block-related calls below, since we know that our
514 // space is a CompactibleFreeListSpace.
515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
517 HeapWord* bottom, \
518 HeapWord* top, \
519 ClosureType* cl) { \
520 if (SharedHeap::heap()->n_par_threads() > 0) { \
521 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
522 } else { \
523 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
524 } \
525 } \
526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
527 HeapWord* bottom, \
528 HeapWord* top, \
529 ClosureType* cl) { \
530 /* Skip parts that are before "mr", in case "block_start" sent us \
531 back too far. */ \
532 HeapWord* mr_start = mr.start(); \
533 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
534 HeapWord* next = bottom + bot_size; \
535 while (next < mr_start) { \
536 bottom = next; \
537 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
538 next = bottom + bot_size; \
539 } \
540 \
541 while (bottom < top) { \
542 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
543 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
544 oop(bottom)) && \
545 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
546 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
547 bottom += _cfls->adjustObjectSize(word_sz); \
548 } else { \
549 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
550 } \
551 } \
552 } \
553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
554 HeapWord* bottom, \
555 HeapWord* top, \
556 ClosureType* cl) { \
557 /* Skip parts that are before "mr", in case "block_start" sent us \
558 back too far. */ \
559 HeapWord* mr_start = mr.start(); \
560 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
561 HeapWord* next = bottom + bot_size; \
562 while (next < mr_start) { \
563 bottom = next; \
564 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
565 next = bottom + bot_size; \
566 } \
567 \
568 while (bottom < top) { \
569 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
570 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
571 oop(bottom)) && \
572 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
573 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
574 bottom += _cfls->adjustObjectSize(word_sz); \
575 } else { \
576 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
577 } \
578 } \
579 }
581 // (There are only two of these, rather than N, because the split is due
582 // only to the introduction of the FilteringClosure, a local part of the
583 // impl of this abstraction.)
584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
587 DirtyCardToOopClosure*
588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
589 CardTableModRefBS::PrecisionStyle precision,
590 HeapWord* boundary) {
591 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
592 }
595 // Note on locking for the space iteration functions:
596 // since the collector's iteration activities are concurrent with
597 // allocation activities by mutators, absent a suitable mutual exclusion
598 // mechanism the iterators may go awry. For instace a block being iterated
599 // may suddenly be allocated or divided up and part of it allocated and
600 // so on.
602 // Apply the given closure to each block in the space.
603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
604 assert_lock_strong(freelistLock());
605 HeapWord *cur, *limit;
606 for (cur = bottom(), limit = end(); cur < limit;
607 cur += cl->do_blk_careful(cur));
608 }
610 // Apply the given closure to each block in the space.
611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
612 assert_lock_strong(freelistLock());
613 HeapWord *cur, *limit;
614 for (cur = bottom(), limit = end(); cur < limit;
615 cur += cl->do_blk(cur));
616 }
618 // Apply the given closure to each oop in the space.
619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
620 assert_lock_strong(freelistLock());
621 HeapWord *cur, *limit;
622 size_t curSize;
623 for (cur = bottom(), limit = end(); cur < limit;
624 cur += curSize) {
625 curSize = block_size(cur);
626 if (block_is_obj(cur)) {
627 oop(cur)->oop_iterate(cl);
628 }
629 }
630 }
632 // Apply the given closure to each oop in the space \intersect memory region.
633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
634 assert_lock_strong(freelistLock());
635 if (is_empty()) {
636 return;
637 }
638 MemRegion cur = MemRegion(bottom(), end());
639 mr = mr.intersection(cur);
640 if (mr.is_empty()) {
641 return;
642 }
643 if (mr.equals(cur)) {
644 oop_iterate(cl);
645 return;
646 }
647 assert(mr.end() <= end(), "just took an intersection above");
648 HeapWord* obj_addr = block_start(mr.start());
649 HeapWord* t = mr.end();
651 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
652 if (block_is_obj(obj_addr)) {
653 // Handle first object specially.
654 oop obj = oop(obj_addr);
655 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
656 } else {
657 FreeChunk* fc = (FreeChunk*)obj_addr;
658 obj_addr += fc->size();
659 }
660 while (obj_addr < t) {
661 HeapWord* obj = obj_addr;
662 obj_addr += block_size(obj_addr);
663 // If "obj_addr" is not greater than top, then the
664 // entire object "obj" is within the region.
665 if (obj_addr <= t) {
666 if (block_is_obj(obj)) {
667 oop(obj)->oop_iterate(cl);
668 }
669 } else {
670 // "obj" extends beyond end of region
671 if (block_is_obj(obj)) {
672 oop(obj)->oop_iterate(&smr_blk);
673 }
674 break;
675 }
676 }
677 }
679 // NOTE: In the following methods, in order to safely be able to
680 // apply the closure to an object, we need to be sure that the
681 // object has been initialized. We are guaranteed that an object
682 // is initialized if we are holding the Heap_lock with the
683 // world stopped.
684 void CompactibleFreeListSpace::verify_objects_initialized() const {
685 if (is_init_completed()) {
686 assert_locked_or_safepoint(Heap_lock);
687 if (Universe::is_fully_initialized()) {
688 guarantee(SafepointSynchronize::is_at_safepoint(),
689 "Required for objects to be initialized");
690 }
691 } // else make a concession at vm start-up
692 }
694 // Apply the given closure to each object in the space
695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
696 assert_lock_strong(freelistLock());
697 NOT_PRODUCT(verify_objects_initialized());
698 HeapWord *cur, *limit;
699 size_t curSize;
700 for (cur = bottom(), limit = end(); cur < limit;
701 cur += curSize) {
702 curSize = block_size(cur);
703 if (block_is_obj(cur)) {
704 blk->do_object(oop(cur));
705 }
706 }
707 }
709 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
710 UpwardsObjectClosure* cl) {
711 assert_locked();
712 NOT_PRODUCT(verify_objects_initialized());
713 Space::object_iterate_mem(mr, cl);
714 }
716 // Callers of this iterator beware: The closure application should
717 // be robust in the face of uninitialized objects and should (always)
718 // return a correct size so that the next addr + size below gives us a
719 // valid block boundary. [See for instance,
720 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
721 // in ConcurrentMarkSweepGeneration.cpp.]
722 HeapWord*
723 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
724 assert_lock_strong(freelistLock());
725 HeapWord *addr, *last;
726 size_t size;
727 for (addr = bottom(), last = end();
728 addr < last; addr += size) {
729 FreeChunk* fc = (FreeChunk*)addr;
730 if (fc->isFree()) {
731 // Since we hold the free list lock, which protects direct
732 // allocation in this generation by mutators, a free object
733 // will remain free throughout this iteration code.
734 size = fc->size();
735 } else {
736 // Note that the object need not necessarily be initialized,
737 // because (for instance) the free list lock does NOT protect
738 // object initialization. The closure application below must
739 // therefore be correct in the face of uninitialized objects.
740 size = cl->do_object_careful(oop(addr));
741 if (size == 0) {
742 // An unparsable object found. Signal early termination.
743 return addr;
744 }
745 }
746 }
747 return NULL;
748 }
750 // Callers of this iterator beware: The closure application should
751 // be robust in the face of uninitialized objects and should (always)
752 // return a correct size so that the next addr + size below gives us a
753 // valid block boundary. [See for instance,
754 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
755 // in ConcurrentMarkSweepGeneration.cpp.]
756 HeapWord*
757 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
758 ObjectClosureCareful* cl) {
759 assert_lock_strong(freelistLock());
760 // Can't use used_region() below because it may not necessarily
761 // be the same as [bottom(),end()); although we could
762 // use [used_region().start(),round_to(used_region().end(),CardSize)),
763 // that appears too cumbersome, so we just do the simpler check
764 // in the assertion below.
765 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
766 "mr should be non-empty and within used space");
767 HeapWord *addr, *end;
768 size_t size;
769 for (addr = block_start_careful(mr.start()), end = mr.end();
770 addr < end; addr += size) {
771 FreeChunk* fc = (FreeChunk*)addr;
772 if (fc->isFree()) {
773 // Since we hold the free list lock, which protects direct
774 // allocation in this generation by mutators, a free object
775 // will remain free throughout this iteration code.
776 size = fc->size();
777 } else {
778 // Note that the object need not necessarily be initialized,
779 // because (for instance) the free list lock does NOT protect
780 // object initialization. The closure application below must
781 // therefore be correct in the face of uninitialized objects.
782 size = cl->do_object_careful_m(oop(addr), mr);
783 if (size == 0) {
784 // An unparsable object found. Signal early termination.
785 return addr;
786 }
787 }
788 }
789 return NULL;
790 }
793 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
794 NOT_PRODUCT(verify_objects_initialized());
795 return _bt.block_start(p);
796 }
798 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
799 return _bt.block_start_careful(p);
800 }
802 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
803 NOT_PRODUCT(verify_objects_initialized());
804 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
805 // This must be volatile, or else there is a danger that the compiler
806 // will compile the code below into a sometimes-infinite loop, by keeping
807 // the value read the first time in a register.
808 while (true) {
809 // We must do this until we get a consistent view of the object.
810 if (FreeChunk::indicatesFreeChunk(p)) {
811 volatile FreeChunk* fc = (volatile FreeChunk*)p;
812 size_t res = fc->size();
813 // If the object is still a free chunk, return the size, else it
814 // has been allocated so try again.
815 if (FreeChunk::indicatesFreeChunk(p)) {
816 assert(res != 0, "Block size should not be 0");
817 return res;
818 }
819 } else {
820 // must read from what 'p' points to in each loop.
821 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
822 if (k != NULL) {
823 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
824 oop o = (oop)p;
825 assert(o->is_parsable(), "Should be parsable");
826 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
827 size_t res = o->size_given_klass(k->klass_part());
828 res = adjustObjectSize(res);
829 assert(res != 0, "Block size should not be 0");
830 return res;
831 }
832 }
833 }
834 }
836 // A variant of the above that uses the Printezis bits for
837 // unparsable but allocated objects. This avoids any possible
838 // stalls waiting for mutators to initialize objects, and is
839 // thus potentially faster than the variant above. However,
840 // this variant may return a zero size for a block that is
841 // under mutation and for which a consistent size cannot be
842 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
843 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
844 const CMSCollector* c)
845 const {
846 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
847 // This must be volatile, or else there is a danger that the compiler
848 // will compile the code below into a sometimes-infinite loop, by keeping
849 // the value read the first time in a register.
850 DEBUG_ONLY(uint loops = 0;)
851 while (true) {
852 // We must do this until we get a consistent view of the object.
853 if (FreeChunk::indicatesFreeChunk(p)) {
854 volatile FreeChunk* fc = (volatile FreeChunk*)p;
855 size_t res = fc->size();
856 if (FreeChunk::indicatesFreeChunk(p)) {
857 assert(res != 0, "Block size should not be 0");
858 assert(loops == 0, "Should be 0");
859 return res;
860 }
861 } else {
862 // must read from what 'p' points to in each loop.
863 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
864 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
865 assert(k->is_oop(), "Should really be klass oop.");
866 oop o = (oop)p;
867 assert(o->is_oop(), "Should be an oop");
868 size_t res = o->size_given_klass(k->klass_part());
869 res = adjustObjectSize(res);
870 assert(res != 0, "Block size should not be 0");
871 return res;
872 } else {
873 return c->block_size_if_printezis_bits(p);
874 }
875 }
876 assert(loops == 0, "Can loop at most once");
877 DEBUG_ONLY(loops++;)
878 }
879 }
881 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
882 NOT_PRODUCT(verify_objects_initialized());
883 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
884 FreeChunk* fc = (FreeChunk*)p;
885 if (fc->isFree()) {
886 return fc->size();
887 } else {
888 // Ignore mark word because this may be a recently promoted
889 // object whose mark word is used to chain together grey
890 // objects (the last one would have a null value).
891 assert(oop(p)->is_oop(true), "Should be an oop");
892 return adjustObjectSize(oop(p)->size());
893 }
894 }
896 // This implementation assumes that the property of "being an object" is
897 // stable. But being a free chunk may not be (because of parallel
898 // promotion.)
899 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
900 FreeChunk* fc = (FreeChunk*)p;
901 assert(is_in_reserved(p), "Should be in space");
902 // When doing a mark-sweep-compact of the CMS generation, this
903 // assertion may fail because prepare_for_compaction() uses
904 // space that is garbage to maintain information on ranges of
905 // live objects so that these live ranges can be moved as a whole.
906 // Comment out this assertion until that problem can be solved
907 // (i.e., that the block start calculation may look at objects
908 // at address below "p" in finding the object that contains "p"
909 // and those objects (if garbage) may have been modified to hold
910 // live range information.
911 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
912 if (FreeChunk::indicatesFreeChunk(p)) return false;
913 klassOop k = oop(p)->klass_or_null();
914 if (k != NULL) {
915 // Ignore mark word because it may have been used to
916 // chain together promoted objects (the last one
917 // would have a null value).
918 assert(oop(p)->is_oop(true), "Should be an oop");
919 return true;
920 } else {
921 return false; // Was not an object at the start of collection.
922 }
923 }
925 // Check if the object is alive. This fact is checked either by consulting
926 // the main marking bitmap in the sweeping phase or, if it's a permanent
927 // generation and we're not in the sweeping phase, by checking the
928 // perm_gen_verify_bit_map where we store the "deadness" information if
929 // we did not sweep the perm gen in the most recent previous GC cycle.
930 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
931 assert (block_is_obj(p), "The address should point to an object");
933 // If we're sweeping, we use object liveness information from the main bit map
934 // for both perm gen and old gen.
935 // We don't need to lock the bitmap (live_map or dead_map below), because
936 // EITHER we are in the middle of the sweeping phase, and the
937 // main marking bit map (live_map below) is locked,
938 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
939 // is stable, because it's mutated only in the sweeping phase.
940 if (_collector->abstract_state() == CMSCollector::Sweeping) {
941 CMSBitMap* live_map = _collector->markBitMap();
942 return live_map->isMarked((HeapWord*) p);
943 } else {
944 // If we're not currently sweeping and we haven't swept the perm gen in
945 // the previous concurrent cycle then we may have dead but unswept objects
946 // in the perm gen. In this case, we use the "deadness" information
947 // that we had saved in perm_gen_verify_bit_map at the last sweep.
948 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
949 if (_collector->verifying()) {
950 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
951 // Object is marked in the dead_map bitmap at the previous sweep
952 // when we know that it's dead; if the bitmap is not allocated then
953 // the object is alive.
954 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
955 || !dead_map->par_isMarked((HeapWord*) p);
956 } else {
957 return false; // We can't say for sure if it's live, so we say that it's dead.
958 }
959 }
960 }
961 return true;
962 }
964 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
965 FreeChunk* fc = (FreeChunk*)p;
966 assert(is_in_reserved(p), "Should be in space");
967 assert(_bt.block_start(p) == p, "Should be a block boundary");
968 if (!fc->isFree()) {
969 // Ignore mark word because it may have been used to
970 // chain together promoted objects (the last one
971 // would have a null value).
972 assert(oop(p)->is_oop(true), "Should be an oop");
973 return true;
974 }
975 return false;
976 }
978 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
979 // approximate answer if you don't hold the freelistlock when you call this.
980 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
981 size_t size = 0;
982 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
983 debug_only(
984 // We may be calling here without the lock in which case we
985 // won't do this modest sanity check.
986 if (freelistLock()->owned_by_self()) {
987 size_t total_list_size = 0;
988 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
989 fc = fc->next()) {
990 total_list_size += i;
991 }
992 assert(total_list_size == i * _indexedFreeList[i].count(),
993 "Count in list is incorrect");
994 }
995 )
996 size += i * _indexedFreeList[i].count();
997 }
998 return size;
999 }
1001 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1002 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1003 return allocate(size);
1004 }
1006 HeapWord*
1007 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1008 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1009 }
1011 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1012 assert_lock_strong(freelistLock());
1013 HeapWord* res = NULL;
1014 assert(size == adjustObjectSize(size),
1015 "use adjustObjectSize() before calling into allocate()");
1017 if (_adaptive_freelists) {
1018 res = allocate_adaptive_freelists(size);
1019 } else { // non-adaptive free lists
1020 res = allocate_non_adaptive_freelists(size);
1021 }
1023 if (res != NULL) {
1024 // check that res does lie in this space!
1025 assert(is_in_reserved(res), "Not in this space!");
1026 assert(is_aligned((void*)res), "alignment check");
1028 FreeChunk* fc = (FreeChunk*)res;
1029 fc->markNotFree();
1030 assert(!fc->isFree(), "shouldn't be marked free");
1031 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1032 // Verify that the block offset table shows this to
1033 // be a single block, but not one which is unallocated.
1034 _bt.verify_single_block(res, size);
1035 _bt.verify_not_unallocated(res, size);
1036 // mangle a just allocated object with a distinct pattern.
1037 debug_only(fc->mangleAllocated(size));
1038 }
1040 return res;
1041 }
1043 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1044 HeapWord* res = NULL;
1045 // try and use linear allocation for smaller blocks
1046 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1047 // if successful, the following also adjusts block offset table
1048 res = getChunkFromSmallLinearAllocBlock(size);
1049 }
1050 // Else triage to indexed lists for smaller sizes
1051 if (res == NULL) {
1052 if (size < SmallForDictionary) {
1053 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1054 } else {
1055 // else get it from the big dictionary; if even this doesn't
1056 // work we are out of luck.
1057 res = (HeapWord*)getChunkFromDictionaryExact(size);
1058 }
1059 }
1061 return res;
1062 }
1064 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1065 assert_lock_strong(freelistLock());
1066 HeapWord* res = NULL;
1067 assert(size == adjustObjectSize(size),
1068 "use adjustObjectSize() before calling into allocate()");
1070 // Strategy
1071 // if small
1072 // exact size from small object indexed list if small
1073 // small or large linear allocation block (linAB) as appropriate
1074 // take from lists of greater sized chunks
1075 // else
1076 // dictionary
1077 // small or large linear allocation block if it has the space
1078 // Try allocating exact size from indexTable first
1079 if (size < IndexSetSize) {
1080 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1081 if(res != NULL) {
1082 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1083 "Not removed from free list");
1084 // no block offset table adjustment is necessary on blocks in
1085 // the indexed lists.
1087 // Try allocating from the small LinAB
1088 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1089 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1090 // if successful, the above also adjusts block offset table
1091 // Note that this call will refill the LinAB to
1092 // satisfy the request. This is different that
1093 // evm.
1094 // Don't record chunk off a LinAB? smallSplitBirth(size);
1096 } else {
1097 // Raid the exact free lists larger than size, even if they are not
1098 // overpopulated.
1099 res = (HeapWord*) getChunkFromGreater(size);
1100 }
1101 } else {
1102 // Big objects get allocated directly from the dictionary.
1103 res = (HeapWord*) getChunkFromDictionaryExact(size);
1104 if (res == NULL) {
1105 // Try hard not to fail since an allocation failure will likely
1106 // trigger a synchronous GC. Try to get the space from the
1107 // allocation blocks.
1108 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1109 }
1110 }
1112 return res;
1113 }
1115 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1116 // when promoting obj.
1117 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1118 // Depending on the object size, expansion may require refilling either a
1119 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
1120 // is added because the dictionary may over-allocate to avoid fragmentation.
1121 size_t space = obj_size;
1122 if (!_adaptive_freelists) {
1123 space = MAX2(space, _smallLinearAllocBlock._refillSize);
1124 }
1125 space += _promoInfo.refillSize() + 2 * MinChunkSize;
1126 return space;
1127 }
1129 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1130 FreeChunk* ret;
1132 assert(numWords >= MinChunkSize, "Size is less than minimum");
1133 assert(linearAllocationWouldFail() || bestFitFirst(),
1134 "Should not be here");
1136 size_t i;
1137 size_t currSize = numWords + MinChunkSize;
1138 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1139 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1140 FreeList* fl = &_indexedFreeList[i];
1141 if (fl->head()) {
1142 ret = getFromListGreater(fl, numWords);
1143 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1144 return ret;
1145 }
1146 }
1148 currSize = MAX2((size_t)SmallForDictionary,
1149 (size_t)(numWords + MinChunkSize));
1151 /* Try to get a chunk that satisfies request, while avoiding
1152 fragmentation that can't be handled. */
1153 {
1154 ret = dictionary()->getChunk(currSize);
1155 if (ret != NULL) {
1156 assert(ret->size() - numWords >= MinChunkSize,
1157 "Chunk is too small");
1158 _bt.allocated((HeapWord*)ret, ret->size());
1159 /* Carve returned chunk. */
1160 (void) splitChunkAndReturnRemainder(ret, numWords);
1161 /* Label this as no longer a free chunk. */
1162 assert(ret->isFree(), "This chunk should be free");
1163 ret->linkPrev(NULL);
1164 }
1165 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1166 return ret;
1167 }
1168 ShouldNotReachHere();
1169 }
1171 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
1172 const {
1173 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1174 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1175 }
1177 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1178 if (fc->size() >= IndexSetSize) {
1179 return dictionary()->verifyChunkInFreeLists(fc);
1180 } else {
1181 return verifyChunkInIndexedFreeLists(fc);
1182 }
1183 }
1185 #ifndef PRODUCT
1186 void CompactibleFreeListSpace::assert_locked() const {
1187 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1188 }
1189 #endif
1191 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1192 // In the parallel case, the main thread holds the free list lock
1193 // on behalf the parallel threads.
1194 assert_locked();
1195 FreeChunk* fc;
1196 {
1197 // If GC is parallel, this might be called by several threads.
1198 // This should be rare enough that the locking overhead won't affect
1199 // the sequential code.
1200 MutexLockerEx x(parDictionaryAllocLock(),
1201 Mutex::_no_safepoint_check_flag);
1202 fc = getChunkFromDictionary(size);
1203 }
1204 if (fc != NULL) {
1205 fc->dontCoalesce();
1206 assert(fc->isFree(), "Should be free, but not coalescable");
1207 // Verify that the block offset table shows this to
1208 // be a single block, but not one which is unallocated.
1209 _bt.verify_single_block((HeapWord*)fc, fc->size());
1210 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1211 }
1212 return fc;
1213 }
1215 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1216 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1217 assert_locked();
1219 // if we are tracking promotions, then first ensure space for
1220 // promotion (including spooling space for saving header if necessary).
1221 // then allocate and copy, then track promoted info if needed.
1222 // When tracking (see PromotionInfo::track()), the mark word may
1223 // be displaced and in this case restoration of the mark word
1224 // occurs in the (oop_since_save_marks_)iterate phase.
1225 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1226 return NULL;
1227 }
1228 // Call the allocate(size_t, bool) form directly to avoid the
1229 // additional call through the allocate(size_t) form. Having
1230 // the compile inline the call is problematic because allocate(size_t)
1231 // is a virtual method.
1232 HeapWord* res = allocate(adjustObjectSize(obj_size));
1233 if (res != NULL) {
1234 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1235 // if we should be tracking promotions, do so.
1236 if (_promoInfo.tracking()) {
1237 _promoInfo.track((PromotedObject*)res);
1238 }
1239 }
1240 return oop(res);
1241 }
1243 HeapWord*
1244 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1245 assert_locked();
1246 assert(size >= MinChunkSize, "minimum chunk size");
1247 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1248 "maximum from smallLinearAllocBlock");
1249 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1250 }
1252 HeapWord*
1253 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1254 size_t size) {
1255 assert_locked();
1256 assert(size >= MinChunkSize, "too small");
1257 HeapWord* res = NULL;
1258 // Try to do linear allocation from blk, making sure that
1259 if (blk->_word_size == 0) {
1260 // We have probably been unable to fill this either in the prologue or
1261 // when it was exhausted at the last linear allocation. Bail out until
1262 // next time.
1263 assert(blk->_ptr == NULL, "consistency check");
1264 return NULL;
1265 }
1266 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1267 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1268 if (res != NULL) return res;
1270 // about to exhaust this linear allocation block
1271 if (blk->_word_size == size) { // exactly satisfied
1272 res = blk->_ptr;
1273 _bt.allocated(res, blk->_word_size);
1274 } else if (size + MinChunkSize <= blk->_refillSize) {
1275 // Update _unallocated_block if the size is such that chunk would be
1276 // returned to the indexed free list. All other chunks in the indexed
1277 // free lists are allocated from the dictionary so that _unallocated_block
1278 // has already been adjusted for them. Do it here so that the cost
1279 // for all chunks added back to the indexed free lists.
1280 if (blk->_word_size < SmallForDictionary) {
1281 _bt.allocated(blk->_ptr, blk->_word_size);
1282 }
1283 // Return the chunk that isn't big enough, and then refill below.
1284 addChunkToFreeLists(blk->_ptr, blk->_word_size);
1285 _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
1286 // Don't keep statistics on adding back chunk from a LinAB.
1287 } else {
1288 // A refilled block would not satisfy the request.
1289 return NULL;
1290 }
1292 blk->_ptr = NULL; blk->_word_size = 0;
1293 refillLinearAllocBlock(blk);
1294 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1295 "block was replenished");
1296 if (res != NULL) {
1297 splitBirth(size);
1298 repairLinearAllocBlock(blk);
1299 } else if (blk->_ptr != NULL) {
1300 res = blk->_ptr;
1301 size_t blk_size = blk->_word_size;
1302 blk->_word_size -= size;
1303 blk->_ptr += size;
1304 splitBirth(size);
1305 repairLinearAllocBlock(blk);
1306 // Update BOT last so that other (parallel) GC threads see a consistent
1307 // view of the BOT and free blocks.
1308 // Above must occur before BOT is updated below.
1309 _bt.split_block(res, blk_size, size); // adjust block offset table
1310 }
1311 return res;
1312 }
1314 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1315 LinearAllocBlock* blk,
1316 size_t size) {
1317 assert_locked();
1318 assert(size >= MinChunkSize, "too small");
1320 HeapWord* res = NULL;
1321 // This is the common case. Keep it simple.
1322 if (blk->_word_size >= size + MinChunkSize) {
1323 assert(blk->_ptr != NULL, "consistency check");
1324 res = blk->_ptr;
1325 // Note that the BOT is up-to-date for the linAB before allocation. It
1326 // indicates the start of the linAB. The split_block() updates the
1327 // BOT for the linAB after the allocation (indicates the start of the
1328 // next chunk to be allocated).
1329 size_t blk_size = blk->_word_size;
1330 blk->_word_size -= size;
1331 blk->_ptr += size;
1332 splitBirth(size);
1333 repairLinearAllocBlock(blk);
1334 // Update BOT last so that other (parallel) GC threads see a consistent
1335 // view of the BOT and free blocks.
1336 // Above must occur before BOT is updated below.
1337 _bt.split_block(res, blk_size, size); // adjust block offset table
1338 _bt.allocated(res, size);
1339 }
1340 return res;
1341 }
1343 FreeChunk*
1344 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1345 assert_locked();
1346 assert(size < SmallForDictionary, "just checking");
1347 FreeChunk* res;
1348 res = _indexedFreeList[size].getChunkAtHead();
1349 if (res == NULL) {
1350 res = getChunkFromIndexedFreeListHelper(size);
1351 }
1352 _bt.verify_not_unallocated((HeapWord*) res, size);
1353 return res;
1354 }
1356 FreeChunk*
1357 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
1358 assert_locked();
1359 FreeChunk* fc = NULL;
1360 if (size < SmallForDictionary) {
1361 assert(_indexedFreeList[size].head() == NULL ||
1362 _indexedFreeList[size].surplus() <= 0,
1363 "List for this size should be empty or under populated");
1364 // Try best fit in exact lists before replenishing the list
1365 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1366 // Replenish list.
1367 //
1368 // Things tried that failed.
1369 // Tried allocating out of the two LinAB's first before
1370 // replenishing lists.
1371 // Tried small linAB of size 256 (size in indexed list)
1372 // and replenishing indexed lists from the small linAB.
1373 //
1374 FreeChunk* newFc = NULL;
1375 size_t replenish_size = CMSIndexedFreeListReplenish * size;
1376 if (replenish_size < SmallForDictionary) {
1377 // Do not replenish from an underpopulated size.
1378 if (_indexedFreeList[replenish_size].surplus() > 0 &&
1379 _indexedFreeList[replenish_size].head() != NULL) {
1380 newFc =
1381 _indexedFreeList[replenish_size].getChunkAtHead();
1382 } else {
1383 newFc = bestFitSmall(replenish_size);
1384 }
1385 }
1386 if (newFc != NULL) {
1387 splitDeath(replenish_size);
1388 } else if (replenish_size > size) {
1389 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1390 newFc =
1391 getChunkFromIndexedFreeListHelper(replenish_size);
1392 }
1393 if (newFc != NULL) {
1394 assert(newFc->size() == replenish_size, "Got wrong size");
1395 size_t i;
1396 FreeChunk *curFc, *nextFc;
1397 // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
1398 // The last chunk is not added to the lists but is returned as the
1399 // free chunk.
1400 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1401 i = 0;
1402 i < (CMSIndexedFreeListReplenish - 1);
1403 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1404 i++) {
1405 curFc->setSize(size);
1406 // Don't record this as a return in order to try and
1407 // determine the "returns" from a GC.
1408 _bt.verify_not_unallocated((HeapWord*) fc, size);
1409 _indexedFreeList[size].returnChunkAtTail(curFc, false);
1410 _bt.mark_block((HeapWord*)curFc, size);
1411 splitBirth(size);
1412 // Don't record the initial population of the indexed list
1413 // as a split birth.
1414 }
1416 // check that the arithmetic was OK above
1417 assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
1418 "inconsistency in carving newFc");
1419 curFc->setSize(size);
1420 _bt.mark_block((HeapWord*)curFc, size);
1421 splitBirth(size);
1422 return curFc;
1423 }
1424 }
1425 } else {
1426 // Get a free chunk from the free chunk dictionary to be returned to
1427 // replenish the indexed free list.
1428 fc = getChunkFromDictionaryExact(size);
1429 }
1430 assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1431 return fc;
1432 }
1434 FreeChunk*
1435 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1436 assert_locked();
1437 FreeChunk* fc = _dictionary->getChunk(size);
1438 if (fc == NULL) {
1439 return NULL;
1440 }
1441 _bt.allocated((HeapWord*)fc, fc->size());
1442 if (fc->size() >= size + MinChunkSize) {
1443 fc = splitChunkAndReturnRemainder(fc, size);
1444 }
1445 assert(fc->size() >= size, "chunk too small");
1446 assert(fc->size() < size + MinChunkSize, "chunk too big");
1447 _bt.verify_single_block((HeapWord*)fc, fc->size());
1448 return fc;
1449 }
1451 FreeChunk*
1452 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1453 assert_locked();
1454 FreeChunk* fc = _dictionary->getChunk(size);
1455 if (fc == NULL) {
1456 return fc;
1457 }
1458 _bt.allocated((HeapWord*)fc, fc->size());
1459 if (fc->size() == size) {
1460 _bt.verify_single_block((HeapWord*)fc, size);
1461 return fc;
1462 }
1463 assert(fc->size() > size, "getChunk() guarantee");
1464 if (fc->size() < size + MinChunkSize) {
1465 // Return the chunk to the dictionary and go get a bigger one.
1466 returnChunkToDictionary(fc);
1467 fc = _dictionary->getChunk(size + MinChunkSize);
1468 if (fc == NULL) {
1469 return NULL;
1470 }
1471 _bt.allocated((HeapWord*)fc, fc->size());
1472 }
1473 assert(fc->size() >= size + MinChunkSize, "tautology");
1474 fc = splitChunkAndReturnRemainder(fc, size);
1475 assert(fc->size() == size, "chunk is wrong size");
1476 _bt.verify_single_block((HeapWord*)fc, size);
1477 return fc;
1478 }
1480 void
1481 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1482 assert_locked();
1484 size_t size = chunk->size();
1485 _bt.verify_single_block((HeapWord*)chunk, size);
1486 // adjust _unallocated_block downward, as necessary
1487 _bt.freed((HeapWord*)chunk, size);
1488 _dictionary->returnChunk(chunk);
1489 }
1491 void
1492 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1493 assert_locked();
1494 size_t size = fc->size();
1495 _bt.verify_single_block((HeapWord*) fc, size);
1496 _bt.verify_not_unallocated((HeapWord*) fc, size);
1497 if (_adaptive_freelists) {
1498 _indexedFreeList[size].returnChunkAtTail(fc);
1499 } else {
1500 _indexedFreeList[size].returnChunkAtHead(fc);
1501 }
1502 }
1504 // Add chunk to end of last block -- if it's the largest
1505 // block -- and update BOT and census data. We would
1506 // of course have preferred to coalesce it with the
1507 // last block, but it's currently less expensive to find the
1508 // largest block than it is to find the last.
1509 void
1510 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1511 HeapWord* chunk, size_t size) {
1512 // check that the chunk does lie in this space!
1513 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1514 assert_locked();
1515 // One of the parallel gc task threads may be here
1516 // whilst others are allocating.
1517 Mutex* lock = NULL;
1518 if (ParallelGCThreads != 0) {
1519 lock = &_parDictionaryAllocLock;
1520 }
1521 FreeChunk* ec;
1522 {
1523 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1524 ec = dictionary()->findLargestDict(); // get largest block
1525 if (ec != NULL && ec->end() == chunk) {
1526 // It's a coterminal block - we can coalesce.
1527 size_t old_size = ec->size();
1528 coalDeath(old_size);
1529 removeChunkFromDictionary(ec);
1530 size += old_size;
1531 } else {
1532 ec = (FreeChunk*)chunk;
1533 }
1534 }
1535 ec->setSize(size);
1536 debug_only(ec->mangleFreed(size));
1537 if (size < SmallForDictionary) {
1538 lock = _indexedFreeListParLocks[size];
1539 }
1540 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1541 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1542 // record the birth under the lock since the recording involves
1543 // manipulation of the list on which the chunk lives and
1544 // if the chunk is allocated and is the last on the list,
1545 // the list can go away.
1546 coalBirth(size);
1547 }
1549 void
1550 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1551 size_t size) {
1552 // check that the chunk does lie in this space!
1553 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1554 assert_locked();
1555 _bt.verify_single_block(chunk, size);
1557 FreeChunk* fc = (FreeChunk*) chunk;
1558 fc->setSize(size);
1559 debug_only(fc->mangleFreed(size));
1560 if (size < SmallForDictionary) {
1561 returnChunkToFreeList(fc);
1562 } else {
1563 returnChunkToDictionary(fc);
1564 }
1565 }
1567 void
1568 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1569 size_t size, bool coalesced) {
1570 assert_locked();
1571 assert(chunk != NULL, "null chunk");
1572 if (coalesced) {
1573 // repair BOT
1574 _bt.single_block(chunk, size);
1575 }
1576 addChunkToFreeLists(chunk, size);
1577 }
1579 // We _must_ find the purported chunk on our free lists;
1580 // we assert if we don't.
1581 void
1582 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1583 size_t size = fc->size();
1584 assert_locked();
1585 debug_only(verifyFreeLists());
1586 if (size < SmallForDictionary) {
1587 removeChunkFromIndexedFreeList(fc);
1588 } else {
1589 removeChunkFromDictionary(fc);
1590 }
1591 _bt.verify_single_block((HeapWord*)fc, size);
1592 debug_only(verifyFreeLists());
1593 }
1595 void
1596 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1597 size_t size = fc->size();
1598 assert_locked();
1599 assert(fc != NULL, "null chunk");
1600 _bt.verify_single_block((HeapWord*)fc, size);
1601 _dictionary->removeChunk(fc);
1602 // adjust _unallocated_block upward, as necessary
1603 _bt.allocated((HeapWord*)fc, size);
1604 }
1606 void
1607 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1608 assert_locked();
1609 size_t size = fc->size();
1610 _bt.verify_single_block((HeapWord*)fc, size);
1611 NOT_PRODUCT(
1612 if (FLSVerifyIndexTable) {
1613 verifyIndexedFreeList(size);
1614 }
1615 )
1616 _indexedFreeList[size].removeChunk(fc);
1617 debug_only(fc->clearNext());
1618 debug_only(fc->clearPrev());
1619 NOT_PRODUCT(
1620 if (FLSVerifyIndexTable) {
1621 verifyIndexedFreeList(size);
1622 }
1623 )
1624 }
1626 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1627 /* A hint is the next larger size that has a surplus.
1628 Start search at a size large enough to guarantee that
1629 the excess is >= MIN_CHUNK. */
1630 size_t start = align_object_size(numWords + MinChunkSize);
1631 if (start < IndexSetSize) {
1632 FreeList* it = _indexedFreeList;
1633 size_t hint = _indexedFreeList[start].hint();
1634 while (hint < IndexSetSize) {
1635 assert(hint % MinObjAlignment == 0, "hint should be aligned");
1636 FreeList *fl = &_indexedFreeList[hint];
1637 if (fl->surplus() > 0 && fl->head() != NULL) {
1638 // Found a list with surplus, reset original hint
1639 // and split out a free chunk which is returned.
1640 _indexedFreeList[start].set_hint(hint);
1641 FreeChunk* res = getFromListGreater(fl, numWords);
1642 assert(res == NULL || res->isFree(),
1643 "Should be returning a free chunk");
1644 return res;
1645 }
1646 hint = fl->hint(); /* keep looking */
1647 }
1648 /* None found. */
1649 it[start].set_hint(IndexSetSize);
1650 }
1651 return NULL;
1652 }
1654 /* Requires fl->size >= numWords + MinChunkSize */
1655 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1656 size_t numWords) {
1657 FreeChunk *curr = fl->head();
1658 size_t oldNumWords = curr->size();
1659 assert(numWords >= MinChunkSize, "Word size is too small");
1660 assert(curr != NULL, "List is empty");
1661 assert(oldNumWords >= numWords + MinChunkSize,
1662 "Size of chunks in the list is too small");
1664 fl->removeChunk(curr);
1665 // recorded indirectly by splitChunkAndReturnRemainder -
1666 // smallSplit(oldNumWords, numWords);
1667 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1668 // Does anything have to be done for the remainder in terms of
1669 // fixing the card table?
1670 assert(new_chunk == NULL || new_chunk->isFree(),
1671 "Should be returning a free chunk");
1672 return new_chunk;
1673 }
1675 FreeChunk*
1676 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1677 size_t new_size) {
1678 assert_locked();
1679 size_t size = chunk->size();
1680 assert(size > new_size, "Split from a smaller block?");
1681 assert(is_aligned(chunk), "alignment problem");
1682 assert(size == adjustObjectSize(size), "alignment problem");
1683 size_t rem_size = size - new_size;
1684 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1685 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1686 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1687 assert(is_aligned(ffc), "alignment problem");
1688 ffc->setSize(rem_size);
1689 ffc->linkNext(NULL);
1690 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1691 // Above must occur before BOT is updated below.
1692 // adjust block offset table
1693 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1694 if (rem_size < SmallForDictionary) {
1695 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1696 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1697 returnChunkToFreeList(ffc);
1698 split(size, rem_size);
1699 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1700 } else {
1701 returnChunkToDictionary(ffc);
1702 split(size ,rem_size);
1703 }
1704 chunk->setSize(new_size);
1705 return chunk;
1706 }
1708 void
1709 CompactibleFreeListSpace::sweep_completed() {
1710 // Now that space is probably plentiful, refill linear
1711 // allocation blocks as needed.
1712 refillLinearAllocBlocksIfNeeded();
1713 }
1715 void
1716 CompactibleFreeListSpace::gc_prologue() {
1717 assert_locked();
1718 if (PrintFLSStatistics != 0) {
1719 gclog_or_tty->print("Before GC:\n");
1720 reportFreeListStatistics();
1721 }
1722 refillLinearAllocBlocksIfNeeded();
1723 }
1725 void
1726 CompactibleFreeListSpace::gc_epilogue() {
1727 assert_locked();
1728 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1729 if (_smallLinearAllocBlock._word_size == 0)
1730 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1731 }
1732 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1733 _promoInfo.stopTrackingPromotions();
1734 repairLinearAllocationBlocks();
1735 // Print Space's stats
1736 if (PrintFLSStatistics != 0) {
1737 gclog_or_tty->print("After GC:\n");
1738 reportFreeListStatistics();
1739 }
1740 }
1742 // Iteration support, mostly delegated from a CMS generation
1744 void CompactibleFreeListSpace::save_marks() {
1745 // mark the "end" of the used space at the time of this call;
1746 // note, however, that promoted objects from this point
1747 // on are tracked in the _promoInfo below.
1748 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
1749 unallocated_block() : end());
1750 // inform allocator that promotions should be tracked.
1751 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1752 _promoInfo.startTrackingPromotions();
1753 }
1755 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1756 assert(_promoInfo.tracking(), "No preceding save_marks?");
1757 guarantee(SharedHeap::heap()->n_par_threads() == 0,
1758 "Shouldn't be called (yet) during parallel part of gc.");
1759 return _promoInfo.noPromotions();
1760 }
1762 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
1763 \
1764 void CompactibleFreeListSpace:: \
1765 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
1766 assert(SharedHeap::heap()->n_par_threads() == 0, \
1767 "Shouldn't be called (yet) during parallel part of gc."); \
1768 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
1769 /* \
1770 * This also restores any displaced headers and removes the elements from \
1771 * the iteration set as they are processed, so that we have a clean slate \
1772 * at the end of the iteration. Note, thus, that if new objects are \
1773 * promoted as a result of the iteration they are iterated over as well. \
1774 */ \
1775 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
1776 }
1778 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1780 //////////////////////////////////////////////////////////////////////////////
1781 // We go over the list of promoted objects, removing each from the list,
1782 // and applying the closure (this may, in turn, add more elements to
1783 // the tail of the promoted list, and these newly added objects will
1784 // also be processed) until the list is empty.
1785 // To aid verification and debugging, in the non-product builds
1786 // we actually forward _promoHead each time we process a promoted oop.
1787 // Note that this is not necessary in general (i.e. when we don't need to
1788 // call PromotionInfo::verify()) because oop_iterate can only add to the
1789 // end of _promoTail, and never needs to look at _promoHead.
1791 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
1792 \
1793 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
1794 NOT_PRODUCT(verify()); \
1795 PromotedObject *curObj, *nextObj; \
1796 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
1797 if ((nextObj = curObj->next()) == NULL) { \
1798 /* protect ourselves against additions due to closure application \
1799 below by resetting the list. */ \
1800 assert(_promoTail == curObj, "Should have been the tail"); \
1801 _promoHead = _promoTail = NULL; \
1802 } \
1803 if (curObj->hasDisplacedMark()) { \
1804 /* restore displaced header */ \
1805 oop(curObj)->set_mark(nextDisplacedHeader()); \
1806 } else { \
1807 /* restore prototypical header */ \
1808 oop(curObj)->init_mark(); \
1809 } \
1810 /* The "promoted_mark" should now not be set */ \
1811 assert(!curObj->hasPromotedMark(), \
1812 "Should have been cleared by restoring displaced mark-word"); \
1813 NOT_PRODUCT(_promoHead = nextObj); \
1814 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
1815 if (nextObj == NULL) { /* start at head of list reset above */ \
1816 nextObj = _promoHead; \
1817 } \
1818 } \
1819 assert(noPromotions(), "post-condition violation"); \
1820 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1821 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1822 assert(_firstIndex == _nextIndex, "empty buffer"); \
1823 }
1825 // This should have been ALL_SINCE_...() just like the others,
1826 // but, because the body of the method above is somehwat longer,
1827 // the MSVC compiler cannot cope; as a workaround, we split the
1828 // macro into its 3 constituent parts below (see original macro
1829 // definition in specializedOopClosures.hpp).
1830 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1831 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1834 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1835 // ugghh... how would one do this efficiently for a non-contiguous space?
1836 guarantee(false, "NYI");
1837 }
1839 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1840 return _smallLinearAllocBlock._word_size == 0;
1841 }
1843 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1844 // Fix up linear allocation blocks to look like free blocks
1845 repairLinearAllocBlock(&_smallLinearAllocBlock);
1846 }
1848 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1849 assert_locked();
1850 if (blk->_ptr != NULL) {
1851 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1852 "Minimum block size requirement");
1853 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1854 fc->setSize(blk->_word_size);
1855 fc->linkPrev(NULL); // mark as free
1856 fc->dontCoalesce();
1857 assert(fc->isFree(), "just marked it free");
1858 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1859 }
1860 }
1862 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1863 assert_locked();
1864 if (_smallLinearAllocBlock._ptr == NULL) {
1865 assert(_smallLinearAllocBlock._word_size == 0,
1866 "Size of linAB should be zero if the ptr is NULL");
1867 // Reset the linAB refill and allocation size limit.
1868 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1869 }
1870 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1871 }
1873 void
1874 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1875 assert_locked();
1876 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1877 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1878 "blk invariant");
1879 if (blk->_ptr == NULL) {
1880 refillLinearAllocBlock(blk);
1881 }
1882 if (PrintMiscellaneous && Verbose) {
1883 if (blk->_word_size == 0) {
1884 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1885 }
1886 }
1887 }
1889 void
1890 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1891 assert_locked();
1892 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1893 "linear allocation block should be empty");
1894 FreeChunk* fc;
1895 if (blk->_refillSize < SmallForDictionary &&
1896 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1897 // A linAB's strategy might be to use small sizes to reduce
1898 // fragmentation but still get the benefits of allocation from a
1899 // linAB.
1900 } else {
1901 fc = getChunkFromDictionary(blk->_refillSize);
1902 }
1903 if (fc != NULL) {
1904 blk->_ptr = (HeapWord*)fc;
1905 blk->_word_size = fc->size();
1906 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1907 }
1908 }
1910 // Support for concurrent collection policy decisions.
1911 bool CompactibleFreeListSpace::should_concurrent_collect() const {
1912 // In the future we might want to add in frgamentation stats --
1913 // including erosion of the "mountain" into this decision as well.
1914 return !adaptive_freelists() && linearAllocationWouldFail();
1915 }
1917 // Support for compaction
1919 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1920 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1921 // prepare_for_compaction() uses the space between live objects
1922 // so that later phase can skip dead space quickly. So verification
1923 // of the free lists doesn't work after.
1924 }
1926 #define obj_size(q) adjustObjectSize(oop(q)->size())
1927 #define adjust_obj_size(s) adjustObjectSize(s)
1929 void CompactibleFreeListSpace::adjust_pointers() {
1930 // In other versions of adjust_pointers(), a bail out
1931 // based on the amount of live data in the generation
1932 // (i.e., if 0, bail out) may be used.
1933 // Cannot test used() == 0 here because the free lists have already
1934 // been mangled by the compaction.
1936 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1937 // See note about verification in prepare_for_compaction().
1938 }
1940 void CompactibleFreeListSpace::compact() {
1941 SCAN_AND_COMPACT(obj_size);
1942 }
1944 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1945 // where fbs is free block sizes
1946 double CompactibleFreeListSpace::flsFrag() const {
1947 size_t itabFree = totalSizeInIndexedFreeLists();
1948 double frag = 0.0;
1949 size_t i;
1951 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1952 double sz = i;
1953 frag += _indexedFreeList[i].count() * (sz * sz);
1954 }
1956 double totFree = itabFree +
1957 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
1958 if (totFree > 0) {
1959 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1960 (totFree * totFree));
1961 frag = (double)1.0 - frag;
1962 } else {
1963 assert(frag == 0.0, "Follows from totFree == 0");
1964 }
1965 return frag;
1966 }
1968 #define CoalSurplusPercent 1.05
1969 #define SplitSurplusPercent 1.10
1971 void CompactibleFreeListSpace::beginSweepFLCensus(
1972 float inter_sweep_current,
1973 float inter_sweep_estimate) {
1974 assert_locked();
1975 size_t i;
1976 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1977 FreeList* fl = &_indexedFreeList[i];
1978 fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
1979 fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
1980 fl->set_beforeSweep(fl->count());
1981 fl->set_bfrSurp(fl->surplus());
1982 }
1983 _dictionary->beginSweepDictCensus(CoalSurplusPercent,
1984 inter_sweep_current,
1985 inter_sweep_estimate);
1986 }
1988 void CompactibleFreeListSpace::setFLSurplus() {
1989 assert_locked();
1990 size_t i;
1991 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1992 FreeList *fl = &_indexedFreeList[i];
1993 fl->set_surplus(fl->count() -
1994 (ssize_t)((double)fl->desired() * SplitSurplusPercent));
1995 }
1996 }
1998 void CompactibleFreeListSpace::setFLHints() {
1999 assert_locked();
2000 size_t i;
2001 size_t h = IndexSetSize;
2002 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2003 FreeList *fl = &_indexedFreeList[i];
2004 fl->set_hint(h);
2005 if (fl->surplus() > 0) {
2006 h = i;
2007 }
2008 }
2009 }
2011 void CompactibleFreeListSpace::clearFLCensus() {
2012 assert_locked();
2013 int i;
2014 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2015 FreeList *fl = &_indexedFreeList[i];
2016 fl->set_prevSweep(fl->count());
2017 fl->set_coalBirths(0);
2018 fl->set_coalDeaths(0);
2019 fl->set_splitBirths(0);
2020 fl->set_splitDeaths(0);
2021 }
2022 }
2024 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2025 setFLSurplus();
2026 setFLHints();
2027 if (PrintGC && PrintFLSCensus > 0) {
2028 printFLCensus(sweep_count);
2029 }
2030 clearFLCensus();
2031 assert_locked();
2032 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2033 }
2035 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2036 if (size < SmallForDictionary) {
2037 FreeList *fl = &_indexedFreeList[size];
2038 return (fl->coalDesired() < 0) ||
2039 ((int)fl->count() > fl->coalDesired());
2040 } else {
2041 return dictionary()->coalDictOverPopulated(size);
2042 }
2043 }
2045 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2046 assert(size < SmallForDictionary, "Size too large for indexed list");
2047 FreeList *fl = &_indexedFreeList[size];
2048 fl->increment_coalBirths();
2049 fl->increment_surplus();
2050 }
2052 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2053 assert(size < SmallForDictionary, "Size too large for indexed list");
2054 FreeList *fl = &_indexedFreeList[size];
2055 fl->increment_coalDeaths();
2056 fl->decrement_surplus();
2057 }
2059 void CompactibleFreeListSpace::coalBirth(size_t size) {
2060 if (size < SmallForDictionary) {
2061 smallCoalBirth(size);
2062 } else {
2063 dictionary()->dictCensusUpdate(size,
2064 false /* split */,
2065 true /* birth */);
2066 }
2067 }
2069 void CompactibleFreeListSpace::coalDeath(size_t size) {
2070 if(size < SmallForDictionary) {
2071 smallCoalDeath(size);
2072 } else {
2073 dictionary()->dictCensusUpdate(size,
2074 false /* split */,
2075 false /* birth */);
2076 }
2077 }
2079 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2080 assert(size < SmallForDictionary, "Size too large for indexed list");
2081 FreeList *fl = &_indexedFreeList[size];
2082 fl->increment_splitBirths();
2083 fl->increment_surplus();
2084 }
2086 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2087 assert(size < SmallForDictionary, "Size too large for indexed list");
2088 FreeList *fl = &_indexedFreeList[size];
2089 fl->increment_splitDeaths();
2090 fl->decrement_surplus();
2091 }
2093 void CompactibleFreeListSpace::splitBirth(size_t size) {
2094 if (size < SmallForDictionary) {
2095 smallSplitBirth(size);
2096 } else {
2097 dictionary()->dictCensusUpdate(size,
2098 true /* split */,
2099 true /* birth */);
2100 }
2101 }
2103 void CompactibleFreeListSpace::splitDeath(size_t size) {
2104 if (size < SmallForDictionary) {
2105 smallSplitDeath(size);
2106 } else {
2107 dictionary()->dictCensusUpdate(size,
2108 true /* split */,
2109 false /* birth */);
2110 }
2111 }
2113 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2114 size_t to2 = from - to1;
2115 splitDeath(from);
2116 splitBirth(to1);
2117 splitBirth(to2);
2118 }
2120 void CompactibleFreeListSpace::print() const {
2121 tty->print(" CompactibleFreeListSpace");
2122 Space::print();
2123 }
2125 void CompactibleFreeListSpace::prepare_for_verify() {
2126 assert_locked();
2127 repairLinearAllocationBlocks();
2128 // Verify that the SpoolBlocks look like free blocks of
2129 // appropriate sizes... To be done ...
2130 }
2132 class VerifyAllBlksClosure: public BlkClosure {
2133 private:
2134 const CompactibleFreeListSpace* _sp;
2135 const MemRegion _span;
2137 public:
2138 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2139 MemRegion span) : _sp(sp), _span(span) { }
2141 virtual size_t do_blk(HeapWord* addr) {
2142 size_t res;
2143 if (_sp->block_is_obj(addr)) {
2144 oop p = oop(addr);
2145 guarantee(p->is_oop(), "Should be an oop");
2146 res = _sp->adjustObjectSize(p->size());
2147 if (_sp->obj_is_alive(addr)) {
2148 p->verify();
2149 }
2150 } else {
2151 FreeChunk* fc = (FreeChunk*)addr;
2152 res = fc->size();
2153 if (FLSVerifyLists && !fc->cantCoalesce()) {
2154 guarantee(_sp->verifyChunkInFreeLists(fc),
2155 "Chunk should be on a free list");
2156 }
2157 }
2158 guarantee(res != 0, "Livelock: no rank reduction!");
2159 return res;
2160 }
2161 };
2163 class VerifyAllOopsClosure: public OopClosure {
2164 private:
2165 const CMSCollector* _collector;
2166 const CompactibleFreeListSpace* _sp;
2167 const MemRegion _span;
2168 const bool _past_remark;
2169 const CMSBitMap* _bit_map;
2171 protected:
2172 void do_oop(void* p, oop obj) {
2173 if (_span.contains(obj)) { // the interior oop points into CMS heap
2174 if (!_span.contains(p)) { // reference from outside CMS heap
2175 // Should be a valid object; the first disjunct below allows
2176 // us to sidestep an assertion in block_is_obj() that insists
2177 // that p be in _sp. Note that several generations (and spaces)
2178 // are spanned by _span (CMS heap) above.
2179 guarantee(!_sp->is_in_reserved(obj) ||
2180 _sp->block_is_obj((HeapWord*)obj),
2181 "Should be an object");
2182 guarantee(obj->is_oop(), "Should be an oop");
2183 obj->verify();
2184 if (_past_remark) {
2185 // Remark has been completed, the object should be marked
2186 _bit_map->isMarked((HeapWord*)obj);
2187 }
2188 } else { // reference within CMS heap
2189 if (_past_remark) {
2190 // Remark has been completed -- so the referent should have
2191 // been marked, if referring object is.
2192 if (_bit_map->isMarked(_collector->block_start(p))) {
2193 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2194 }
2195 }
2196 }
2197 } else if (_sp->is_in_reserved(p)) {
2198 // the reference is from FLS, and points out of FLS
2199 guarantee(obj->is_oop(), "Should be an oop");
2200 obj->verify();
2201 }
2202 }
2204 template <class T> void do_oop_work(T* p) {
2205 T heap_oop = oopDesc::load_heap_oop(p);
2206 if (!oopDesc::is_null(heap_oop)) {
2207 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2208 do_oop(p, obj);
2209 }
2210 }
2212 public:
2213 VerifyAllOopsClosure(const CMSCollector* collector,
2214 const CompactibleFreeListSpace* sp, MemRegion span,
2215 bool past_remark, CMSBitMap* bit_map) :
2216 OopClosure(), _collector(collector), _sp(sp), _span(span),
2217 _past_remark(past_remark), _bit_map(bit_map) { }
2219 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2220 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2221 };
2223 void CompactibleFreeListSpace::verify(bool ignored) const {
2224 assert_lock_strong(&_freelistLock);
2225 verify_objects_initialized();
2226 MemRegion span = _collector->_span;
2227 bool past_remark = (_collector->abstract_state() ==
2228 CMSCollector::Sweeping);
2230 ResourceMark rm;
2231 HandleMark hm;
2233 // Check integrity of CFL data structures
2234 _promoInfo.verify();
2235 _dictionary->verify();
2236 if (FLSVerifyIndexTable) {
2237 verifyIndexedFreeLists();
2238 }
2239 // Check integrity of all objects and free blocks in space
2240 {
2241 VerifyAllBlksClosure cl(this, span);
2242 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
2243 }
2244 // Check that all references in the heap to FLS
2245 // are to valid objects in FLS or that references in
2246 // FLS are to valid objects elsewhere in the heap
2247 if (FLSVerifyAllHeapReferences)
2248 {
2249 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2250 _collector->markBitMap());
2251 CollectedHeap* ch = Universe::heap();
2252 ch->oop_iterate(&cl); // all oops in generations
2253 ch->permanent_oop_iterate(&cl); // all oops in perm gen
2254 }
2256 if (VerifyObjectStartArray) {
2257 // Verify the block offset table
2258 _bt.verify();
2259 }
2260 }
2262 #ifndef PRODUCT
2263 void CompactibleFreeListSpace::verifyFreeLists() const {
2264 if (FLSVerifyLists) {
2265 _dictionary->verify();
2266 verifyIndexedFreeLists();
2267 } else {
2268 if (FLSVerifyDictionary) {
2269 _dictionary->verify();
2270 }
2271 if (FLSVerifyIndexTable) {
2272 verifyIndexedFreeLists();
2273 }
2274 }
2275 }
2276 #endif
2278 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2279 size_t i = 0;
2280 for (; i < MinChunkSize; i++) {
2281 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2282 }
2283 for (; i < IndexSetSize; i++) {
2284 verifyIndexedFreeList(i);
2285 }
2286 }
2288 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2289 FreeChunk* fc = _indexedFreeList[size].head();
2290 guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2291 for (; fc != NULL; fc = fc->next()) {
2292 guarantee(fc->size() == size, "Size inconsistency");
2293 guarantee(fc->isFree(), "!free?");
2294 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2295 }
2296 }
2298 #ifndef PRODUCT
2299 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2300 assert(_dictionary->minSize() <= IndexSetSize,
2301 "Some sizes can't be allocated without recourse to"
2302 " linear allocation buffers");
2303 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2304 "else MIN_TREE_CHUNK_SIZE is wrong");
2305 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2306 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2307 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2308 "Some for-loops may be incorrectly initialized");
2309 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2310 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2311 }
2312 #endif
2314 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2315 assert_lock_strong(&_freelistLock);
2316 FreeList total;
2317 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2318 FreeList::print_labels_on(gclog_or_tty, "size");
2319 size_t totalFree = 0;
2320 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2321 const FreeList *fl = &_indexedFreeList[i];
2322 totalFree += fl->count() * fl->size();
2323 if (i % (40*IndexSetStride) == 0) {
2324 FreeList::print_labels_on(gclog_or_tty, "size");
2325 }
2326 fl->print_on(gclog_or_tty);
2327 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
2328 total.set_surplus( total.surplus() + fl->surplus() );
2329 total.set_desired( total.desired() + fl->desired() );
2330 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
2331 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2332 total.set_count( total.count() + fl->count() );
2333 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
2334 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
2335 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2336 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2337 }
2338 total.print_on(gclog_or_tty, "TOTAL");
2339 gclog_or_tty->print_cr("Total free in indexed lists "
2340 SIZE_FORMAT " words", totalFree);
2341 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2342 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2343 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2344 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2345 _dictionary->printDictCensus();
2346 }
2348 // Return the next displaced header, incrementing the pointer and
2349 // recycling spool area as necessary.
2350 markOop PromotionInfo::nextDisplacedHeader() {
2351 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2352 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2353 "Empty spool space: no displaced header can be fetched");
2354 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2355 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2356 // Spool forward
2357 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2358 // forward to next block, recycling this block into spare spool buffer
2359 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2360 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2361 _spoolHead->nextSpoolBlock = _spareSpool;
2362 _spareSpool = _spoolHead;
2363 _spoolHead = tmp;
2364 _firstIndex = 1;
2365 NOT_PRODUCT(
2366 if (_spoolHead == NULL) { // all buffers fully consumed
2367 assert(_spoolTail == NULL && _nextIndex == 1,
2368 "spool buffers processing inconsistency");
2369 }
2370 )
2371 }
2372 return hdr;
2373 }
2375 void PromotionInfo::track(PromotedObject* trackOop) {
2376 track(trackOop, oop(trackOop)->klass());
2377 }
2379 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
2380 // make a copy of header as it may need to be spooled
2381 markOop mark = oop(trackOop)->mark();
2382 trackOop->clearNext();
2383 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
2384 // save non-prototypical header, and mark oop
2385 saveDisplacedHeader(mark);
2386 trackOop->setDisplacedMark();
2387 } else {
2388 // we'd like to assert something like the following:
2389 // assert(mark == markOopDesc::prototype(), "consistency check");
2390 // ... but the above won't work because the age bits have not (yet) been
2391 // cleared. The remainder of the check would be identical to the
2392 // condition checked in must_be_preserved() above, so we don't really
2393 // have anything useful to check here!
2394 }
2395 if (_promoTail != NULL) {
2396 assert(_promoHead != NULL, "List consistency");
2397 _promoTail->setNext(trackOop);
2398 _promoTail = trackOop;
2399 } else {
2400 assert(_promoHead == NULL, "List consistency");
2401 _promoHead = _promoTail = trackOop;
2402 }
2403 // Mask as newly promoted, so we can skip over such objects
2404 // when scanning dirty cards
2405 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
2406 trackOop->setPromotedMark();
2407 }
2409 // Save the given displaced header, incrementing the pointer and
2410 // obtaining more spool area as necessary.
2411 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
2412 assert(_spoolHead != NULL && _spoolTail != NULL,
2413 "promotionInfo inconsistency");
2414 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
2415 _spoolTail->displacedHdr[_nextIndex] = hdr;
2416 // Spool forward
2417 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
2418 // get a new spooling block
2419 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
2420 _splice_point = _spoolTail; // save for splicing
2421 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
2422 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
2423 // ... but will attempt filling before next promotion attempt
2424 _nextIndex = 1;
2425 }
2426 }
2428 // Ensure that spooling space exists. Return false if spooling space
2429 // could not be obtained.
2430 bool PromotionInfo::ensure_spooling_space_work() {
2431 assert(!has_spooling_space(), "Only call when there is no spooling space");
2432 // Try and obtain more spooling space
2433 SpoolBlock* newSpool = getSpoolBlock();
2434 assert(newSpool == NULL ||
2435 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
2436 "getSpoolBlock() sanity check");
2437 if (newSpool == NULL) {
2438 return false;
2439 }
2440 _nextIndex = 1;
2441 if (_spoolTail == NULL) {
2442 _spoolTail = newSpool;
2443 if (_spoolHead == NULL) {
2444 _spoolHead = newSpool;
2445 _firstIndex = 1;
2446 } else {
2447 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
2448 "Splice point invariant");
2449 // Extra check that _splice_point is connected to list
2450 #ifdef ASSERT
2451 {
2452 SpoolBlock* blk = _spoolHead;
2453 for (; blk->nextSpoolBlock != NULL;
2454 blk = blk->nextSpoolBlock);
2455 assert(blk != NULL && blk == _splice_point,
2456 "Splice point incorrect");
2457 }
2458 #endif // ASSERT
2459 _splice_point->nextSpoolBlock = newSpool;
2460 }
2461 } else {
2462 assert(_spoolHead != NULL, "spool list consistency");
2463 _spoolTail->nextSpoolBlock = newSpool;
2464 _spoolTail = newSpool;
2465 }
2466 return true;
2467 }
2469 // Get a free spool buffer from the free pool, getting a new block
2470 // from the heap if necessary.
2471 SpoolBlock* PromotionInfo::getSpoolBlock() {
2472 SpoolBlock* res;
2473 if ((res = _spareSpool) != NULL) {
2474 _spareSpool = _spareSpool->nextSpoolBlock;
2475 res->nextSpoolBlock = NULL;
2476 } else { // spare spool exhausted, get some from heap
2477 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
2478 if (res != NULL) {
2479 res->init();
2480 }
2481 }
2482 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
2483 return res;
2484 }
2486 void PromotionInfo::startTrackingPromotions() {
2487 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2488 "spooling inconsistency?");
2489 _firstIndex = _nextIndex = 1;
2490 _tracking = true;
2491 }
2493 void PromotionInfo::stopTrackingPromotions() {
2494 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2495 "spooling inconsistency?");
2496 _firstIndex = _nextIndex = 1;
2497 _tracking = false;
2498 }
2500 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
2501 // points to the next slot available for filling.
2502 // The set of slots holding displaced headers are then all those in the
2503 // right-open interval denoted by:
2504 //
2505 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
2506 //
2507 // When _spoolTail is NULL, then the set of slots with displaced headers
2508 // is all those starting at the slot <_spoolHead, _firstIndex> and
2509 // going up to the last slot of last block in the linked list.
2510 // In this lartter case, _splice_point points to the tail block of
2511 // this linked list of blocks holding displaced headers.
2512 void PromotionInfo::verify() const {
2513 // Verify the following:
2514 // 1. the number of displaced headers matches the number of promoted
2515 // objects that have displaced headers
2516 // 2. each promoted object lies in this space
2517 debug_only(
2518 PromotedObject* junk = NULL;
2519 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
2520 "Offset of PromotedObject::_next is expected to align with "
2521 " the OopDesc::_mark within OopDesc");
2522 )
2523 // FIXME: guarantee????
2524 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
2525 _splice_point != NULL, "list consistency");
2526 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
2527 // count the number of objects with displaced headers
2528 size_t numObjsWithDisplacedHdrs = 0;
2529 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
2530 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
2531 // the last promoted object may fail the mark() != NULL test of is_oop().
2532 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
2533 if (curObj->hasDisplacedMark()) {
2534 numObjsWithDisplacedHdrs++;
2535 }
2536 }
2537 // Count the number of displaced headers
2538 size_t numDisplacedHdrs = 0;
2539 for (SpoolBlock* curSpool = _spoolHead;
2540 curSpool != _spoolTail && curSpool != NULL;
2541 curSpool = curSpool->nextSpoolBlock) {
2542 // the first entry is just a self-pointer; indices 1 through
2543 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
2544 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
2545 "first entry of displacedHdr should be self-referential");
2546 numDisplacedHdrs += curSpool->bufferSize - 1;
2547 }
2548 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
2549 "internal consistency");
2550 guarantee(_spoolTail != NULL || _nextIndex == 1,
2551 "Inconsistency between _spoolTail and _nextIndex");
2552 // We overcounted (_firstIndex-1) worth of slots in block
2553 // _spoolHead and we undercounted (_nextIndex-1) worth of
2554 // slots in block _spoolTail. We make an appropriate
2555 // adjustment by subtracting the first and adding the
2556 // second: - (_firstIndex - 1) + (_nextIndex - 1)
2557 numDisplacedHdrs += (_nextIndex - _firstIndex);
2558 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
2559 }
2562 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2563 _cfls(cfls)
2564 {
2565 _blocks_to_claim = CMSParPromoteBlocksToClaim;
2566 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2567 i < CompactibleFreeListSpace::IndexSetSize;
2568 i += CompactibleFreeListSpace::IndexSetStride) {
2569 _indexedFreeList[i].set_size(i);
2570 }
2571 }
2573 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2574 FreeChunk* res;
2575 word_sz = _cfls->adjustObjectSize(word_sz);
2576 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2577 // This locking manages sync with other large object allocations.
2578 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2579 Mutex::_no_safepoint_check_flag);
2580 res = _cfls->getChunkFromDictionaryExact(word_sz);
2581 if (res == NULL) return NULL;
2582 } else {
2583 FreeList* fl = &_indexedFreeList[word_sz];
2584 bool filled = false; //TRAP
2585 if (fl->count() == 0) {
2586 bool filled = true; //TRAP
2587 // Attempt to refill this local free list.
2588 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2589 // If it didn't work, give up.
2590 if (fl->count() == 0) return NULL;
2591 }
2592 res = fl->getChunkAtHead();
2593 assert(res != NULL, "Why was count non-zero?");
2594 }
2595 res->markNotFree();
2596 assert(!res->isFree(), "shouldn't be marked free");
2597 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2598 // mangle a just allocated object with a distinct pattern.
2599 debug_only(res->mangleAllocated(word_sz));
2600 return (HeapWord*)res;
2601 }
2603 void CFLS_LAB::retire() {
2604 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2605 i < CompactibleFreeListSpace::IndexSetSize;
2606 i += CompactibleFreeListSpace::IndexSetStride) {
2607 if (_indexedFreeList[i].count() > 0) {
2608 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2609 Mutex::_no_safepoint_check_flag);
2610 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2611 // Reset this list.
2612 _indexedFreeList[i] = FreeList();
2613 _indexedFreeList[i].set_size(i);
2614 }
2615 }
2616 }
2618 void
2619 CompactibleFreeListSpace::
2620 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2621 assert(fl->count() == 0, "Precondition.");
2622 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2623 "Precondition");
2625 // We'll try all multiples of word_sz in the indexed set (starting with
2626 // word_sz itself), then try getting a big chunk and splitting it.
2627 int k = 1;
2628 size_t cur_sz = k * word_sz;
2629 bool found = false;
2630 while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
2631 FreeList* gfl = &_indexedFreeList[cur_sz];
2632 FreeList fl_for_cur_sz; // Empty.
2633 fl_for_cur_sz.set_size(cur_sz);
2634 {
2635 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2636 Mutex::_no_safepoint_check_flag);
2637 if (gfl->count() != 0) {
2638 size_t nn = MAX2(n/k, (size_t)1);
2639 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2640 found = true;
2641 }
2642 }
2643 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2644 if (found) {
2645 if (k == 1) {
2646 fl->prepend(&fl_for_cur_sz);
2647 } else {
2648 // Divide each block on fl_for_cur_sz up k ways.
2649 FreeChunk* fc;
2650 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2651 // Must do this in reverse order, so that anybody attempting to
2652 // access the main chunk sees it as a single free block until we
2653 // change it.
2654 size_t fc_size = fc->size();
2655 for (int i = k-1; i >= 0; i--) {
2656 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2657 ffc->setSize(word_sz);
2658 ffc->linkNext(NULL);
2659 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2660 // Above must occur before BOT is updated below.
2661 // splitting from the right, fc_size == (k - i + 1) * wordsize
2662 _bt.mark_block((HeapWord*)ffc, word_sz);
2663 fc_size -= word_sz;
2664 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2665 _bt.verify_single_block((HeapWord*)fc, fc_size);
2666 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2667 // Push this on "fl".
2668 fl->returnChunkAtHead(ffc);
2669 }
2670 // TRAP
2671 assert(fl->tail()->next() == NULL, "List invariant.");
2672 }
2673 }
2674 return;
2675 }
2676 k++; cur_sz = k * word_sz;
2677 }
2678 // Otherwise, we'll split a block from the dictionary.
2679 FreeChunk* fc = NULL;
2680 FreeChunk* rem_fc = NULL;
2681 size_t rem;
2682 {
2683 MutexLockerEx x(parDictionaryAllocLock(),
2684 Mutex::_no_safepoint_check_flag);
2685 while (n > 0) {
2686 fc = dictionary()->getChunk(MAX2(n * word_sz,
2687 _dictionary->minSize()),
2688 FreeBlockDictionary::atLeast);
2689 if (fc != NULL) {
2690 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
2691 dictionary()->dictCensusUpdate(fc->size(),
2692 true /*split*/,
2693 false /*birth*/);
2694 break;
2695 } else {
2696 n--;
2697 }
2698 }
2699 if (fc == NULL) return;
2700 // Otherwise, split up that block.
2701 size_t nn = fc->size() / word_sz;
2702 n = MIN2(nn, n);
2703 rem = fc->size() - n * word_sz;
2704 // If there is a remainder, and it's too small, allocate one fewer.
2705 if (rem > 0 && rem < MinChunkSize) {
2706 n--; rem += word_sz;
2707 }
2708 // First return the remainder, if any.
2709 // Note that we hold the lock until we decide if we're going to give
2710 // back the remainder to the dictionary, since a contending allocator
2711 // may otherwise see the heap as empty. (We're willing to take that
2712 // hit if the block is a small block.)
2713 if (rem > 0) {
2714 size_t prefix_size = n * word_sz;
2715 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2716 rem_fc->setSize(rem);
2717 rem_fc->linkNext(NULL);
2718 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2719 // Above must occur before BOT is updated below.
2720 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2721 if (rem >= IndexSetSize) {
2722 returnChunkToDictionary(rem_fc);
2723 dictionary()->dictCensusUpdate(fc->size(),
2724 true /*split*/,
2725 true /*birth*/);
2726 rem_fc = NULL;
2727 }
2728 // Otherwise, return it to the small list below.
2729 }
2730 }
2731 //
2732 if (rem_fc != NULL) {
2733 MutexLockerEx x(_indexedFreeListParLocks[rem],
2734 Mutex::_no_safepoint_check_flag);
2735 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2736 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2737 smallSplitBirth(rem);
2738 }
2740 // Now do the splitting up.
2741 // Must do this in reverse order, so that anybody attempting to
2742 // access the main chunk sees it as a single free block until we
2743 // change it.
2744 size_t fc_size = n * word_sz;
2745 // All but first chunk in this loop
2746 for (ssize_t i = n-1; i > 0; i--) {
2747 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2748 ffc->setSize(word_sz);
2749 ffc->linkNext(NULL);
2750 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2751 // Above must occur before BOT is updated below.
2752 // splitting from the right, fc_size == (n - i + 1) * wordsize
2753 _bt.mark_block((HeapWord*)ffc, word_sz);
2754 fc_size -= word_sz;
2755 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2756 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2757 _bt.verify_single_block((HeapWord*)fc, fc_size);
2758 // Push this on "fl".
2759 fl->returnChunkAtHead(ffc);
2760 }
2761 // First chunk
2762 fc->setSize(word_sz);
2763 fc->linkNext(NULL);
2764 fc->linkPrev(NULL);
2765 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2766 _bt.verify_single_block((HeapWord*)fc, fc->size());
2767 fl->returnChunkAtHead(fc);
2769 {
2770 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2771 Mutex::_no_safepoint_check_flag);
2772 ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
2773 _indexedFreeList[word_sz].set_splitBirths(new_births);
2774 ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2775 _indexedFreeList[word_sz].set_surplus(new_surplus);
2776 }
2778 // TRAP
2779 assert(fl->tail()->next() == NULL, "List invariant.");
2780 }
2782 // Set up the space's par_seq_tasks structure for work claiming
2783 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2784 // XXX Need to suitably abstract and generalize this and the next
2785 // method into one.
2786 void
2787 CompactibleFreeListSpace::
2788 initialize_sequential_subtasks_for_rescan(int n_threads) {
2789 // The "size" of each task is fixed according to rescan_task_size.
2790 assert(n_threads > 0, "Unexpected n_threads argument");
2791 const size_t task_size = rescan_task_size();
2792 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2793 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2794 assert(n_tasks == 0 ||
2795 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2796 (used_region().start() + n_tasks*task_size >= used_region().end())),
2797 "n_tasks calculation incorrect");
2798 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2799 assert(!pst->valid(), "Clobbering existing data?");
2800 pst->set_par_threads(n_threads);
2801 pst->set_n_tasks((int)n_tasks);
2802 }
2804 // Set up the space's par_seq_tasks structure for work claiming
2805 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2806 void
2807 CompactibleFreeListSpace::
2808 initialize_sequential_subtasks_for_marking(int n_threads,
2809 HeapWord* low) {
2810 // The "size" of each task is fixed according to rescan_task_size.
2811 assert(n_threads > 0, "Unexpected n_threads argument");
2812 const size_t task_size = marking_task_size();
2813 assert(task_size > CardTableModRefBS::card_size_in_words &&
2814 (task_size % CardTableModRefBS::card_size_in_words == 0),
2815 "Otherwise arithmetic below would be incorrect");
2816 MemRegion span = _gen->reserved();
2817 if (low != NULL) {
2818 if (span.contains(low)) {
2819 // Align low down to a card boundary so that
2820 // we can use block_offset_careful() on span boundaries.
2821 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2822 CardTableModRefBS::card_size);
2823 // Clip span prefix at aligned_low
2824 span = span.intersection(MemRegion(aligned_low, span.end()));
2825 } else if (low > span.end()) {
2826 span = MemRegion(low, low); // Null region
2827 } // else use entire span
2828 }
2829 assert(span.is_empty() ||
2830 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
2831 "span should start at a card boundary");
2832 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2833 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2834 assert(n_tasks == 0 ||
2835 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2836 (span.start() + n_tasks*task_size >= span.end())),
2837 "n_tasks calculation incorrect");
2838 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2839 assert(!pst->valid(), "Clobbering existing data?");
2840 pst->set_par_threads(n_threads);
2841 pst->set_n_tasks((int)n_tasks);
2842 }