duke@435: /* duke@435: * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_compactibleFreeListSpace.cpp.incl" duke@435: duke@435: ///////////////////////////////////////////////////////////////////////// duke@435: //// CompactibleFreeListSpace duke@435: ///////////////////////////////////////////////////////////////////////// duke@435: duke@435: // highest ranked free list lock rank duke@435: int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; duke@435: duke@435: // Constructor duke@435: CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, duke@435: MemRegion mr, bool use_adaptive_freelists, duke@435: FreeBlockDictionary::DictionaryChoice dictionaryChoice) : duke@435: _dictionaryChoice(dictionaryChoice), duke@435: _adaptive_freelists(use_adaptive_freelists), duke@435: _bt(bs, mr), duke@435: // free list locks are in the range of values taken by _lockRank duke@435: // This range currently is [_leaf+2, _leaf+3] duke@435: // Note: this requires that CFLspace c'tors duke@435: // are called serially in the order in which the locks are duke@435: // are acquired in the program text. This is true today. duke@435: _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true), duke@435: _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 duke@435: "CompactibleFreeListSpace._dict_par_lock", true), duke@435: _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * duke@435: CMSRescanMultiple), duke@435: _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord * duke@435: CMSConcMarkMultiple), duke@435: _collector(NULL) duke@435: { duke@435: _bt.set_space(this); duke@435: initialize(mr, true); duke@435: // We have all of "mr", all of which we place in the dictionary duke@435: // as one big chunk. We'll need to decide here which of several duke@435: // possible alternative dictionary implementations to use. For duke@435: // now the choice is easy, since we have only one working duke@435: // implementation, namely, the simple binary tree (splaying duke@435: // temporarily disabled). duke@435: switch (dictionaryChoice) { duke@435: case FreeBlockDictionary::dictionaryBinaryTree: duke@435: _dictionary = new BinaryTreeDictionary(mr); duke@435: break; duke@435: case FreeBlockDictionary::dictionarySplayTree: duke@435: case FreeBlockDictionary::dictionarySkipList: duke@435: default: duke@435: warning("dictionaryChoice: selected option not understood; using" duke@435: " default BinaryTreeDictionary implementation instead."); duke@435: _dictionary = new BinaryTreeDictionary(mr); duke@435: break; duke@435: } duke@435: splitBirth(mr.word_size()); duke@435: assert(_dictionary != NULL, "CMS dictionary initialization"); duke@435: // The indexed free lists are initially all empty and are lazily duke@435: // filled in on demand. Initialize the array elements to NULL. duke@435: initializeIndexedFreeListArray(); duke@435: duke@435: // Not using adaptive free lists assumes that allocation is first duke@435: // from the linAB's. Also a cms perm gen which can be compacted duke@435: // has to have the klass's klassKlass allocated at a lower duke@435: // address in the heap than the klass so that the klassKlass is duke@435: // moved to its new location before the klass is moved. duke@435: // Set the _refillSize for the linear allocation blocks duke@435: if (!use_adaptive_freelists) { duke@435: FreeChunk* fc = _dictionary->getChunk(mr.word_size()); duke@435: // The small linAB initially has all the space and will allocate duke@435: // a chunk of any size. duke@435: HeapWord* addr = (HeapWord*) fc; duke@435: _smallLinearAllocBlock.set(addr, fc->size() , duke@435: 1024*SmallForLinearAlloc, fc->size()); duke@435: // Note that _unallocated_block is not updated here. duke@435: // Allocations from the linear allocation block should duke@435: // update it. duke@435: } else { duke@435: _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, duke@435: SmallForLinearAlloc); duke@435: } duke@435: // CMSIndexedFreeListReplenish should be at least 1 duke@435: CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); duke@435: _promoInfo.setSpace(this); duke@435: if (UseCMSBestFit) { duke@435: _fitStrategy = FreeBlockBestFitFirst; duke@435: } else { duke@435: _fitStrategy = FreeBlockStrategyNone; duke@435: } duke@435: checkFreeListConsistency(); duke@435: duke@435: // Initialize locks for parallel case. duke@435: if (ParallelGCThreads > 0) { duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 duke@435: "a freelist par lock", duke@435: true); duke@435: if (_indexedFreeListParLocks[i] == NULL) duke@435: vm_exit_during_initialization("Could not allocate a par lock"); duke@435: DEBUG_ONLY( duke@435: _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); duke@435: ) duke@435: } duke@435: _dictionary->set_par_lock(&_parDictionaryAllocLock); duke@435: } duke@435: } duke@435: duke@435: // Like CompactibleSpace forward() but always calls cross_threshold() to duke@435: // update the block offset table. Removed initialize_threshold call because duke@435: // CFLS does not use a block offset array for contiguous spaces. duke@435: HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, duke@435: CompactPoint* cp, HeapWord* compact_top) { duke@435: // q is alive duke@435: // First check if we should switch compaction space duke@435: assert(this == cp->space, "'this' should be current compaction space."); duke@435: size_t compaction_max_size = pointer_delta(end(), compact_top); duke@435: assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), duke@435: "virtual adjustObjectSize_v() method is not correct"); duke@435: size_t adjusted_size = adjustObjectSize(size); duke@435: assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, duke@435: "no small fragments allowed"); duke@435: assert(minimum_free_block_size() == MinChunkSize, duke@435: "for de-virtualized reference below"); duke@435: // Can't leave a nonzero size, residual fragment smaller than MinChunkSize duke@435: if (adjusted_size + MinChunkSize > compaction_max_size && duke@435: adjusted_size != compaction_max_size) { duke@435: do { duke@435: // switch to next compaction space duke@435: cp->space->set_compaction_top(compact_top); duke@435: cp->space = cp->space->next_compaction_space(); duke@435: if (cp->space == NULL) { duke@435: cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); duke@435: assert(cp->gen != NULL, "compaction must succeed"); duke@435: cp->space = cp->gen->first_compaction_space(); duke@435: assert(cp->space != NULL, "generation must have a first compaction space"); duke@435: } duke@435: compact_top = cp->space->bottom(); duke@435: cp->space->set_compaction_top(compact_top); duke@435: // The correct adjusted_size may not be the same as that for this method duke@435: // (i.e., cp->space may no longer be "this" so adjust the size again. duke@435: // Use the virtual method which is not used above to save the virtual duke@435: // dispatch. duke@435: adjusted_size = cp->space->adjust_object_size_v(size); duke@435: compaction_max_size = pointer_delta(cp->space->end(), compact_top); duke@435: assert(cp->space->minimum_free_block_size() == 0, "just checking"); duke@435: } while (adjusted_size > compaction_max_size); duke@435: } duke@435: duke@435: // store the forwarding pointer into the mark word duke@435: if ((HeapWord*)q != compact_top) { duke@435: q->forward_to(oop(compact_top)); duke@435: assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); duke@435: } else { duke@435: // if the object isn't moving we can just set the mark to the default duke@435: // mark and handle it specially later on. duke@435: q->init_mark(); duke@435: assert(q->forwardee() == NULL, "should be forwarded to NULL"); duke@435: } duke@435: coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size)); duke@435: compact_top += adjusted_size; duke@435: duke@435: // we need to update the offset table so that the beginnings of objects can be duke@435: // found during scavenge. Note that we are updating the offset table based on duke@435: // where the object will be once the compaction phase finishes. duke@435: duke@435: // Always call cross_threshold(). A contiguous space can only call it when duke@435: // the compaction_top exceeds the current threshold but not for an duke@435: // non-contiguous space. duke@435: cp->threshold = duke@435: cp->space->cross_threshold(compact_top - adjusted_size, compact_top); duke@435: return compact_top; duke@435: } duke@435: duke@435: // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt duke@435: // and use of single_block instead of alloc_block. The name here is not really duke@435: // appropriate - maybe a more general name could be invented for both the duke@435: // contiguous and noncontiguous spaces. duke@435: duke@435: HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { duke@435: _bt.single_block(start, the_end); duke@435: return end(); duke@435: } duke@435: duke@435: // Initialize them to NULL. duke@435: void CompactibleFreeListSpace::initializeIndexedFreeListArray() { duke@435: for (size_t i = 0; i < IndexSetSize; i++) { duke@435: // Note that on platforms where objects are double word aligned, duke@435: // the odd array elements are not used. It is convenient, however, duke@435: // to map directly from the object size to the array element. duke@435: _indexedFreeList[i].reset(IndexSetSize); duke@435: _indexedFreeList[i].set_size(i); duke@435: assert(_indexedFreeList[i].count() == 0, "reset check failed"); duke@435: assert(_indexedFreeList[i].head() == NULL, "reset check failed"); duke@435: assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); duke@435: assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::resetIndexedFreeListArray() { duke@435: for (int i = 1; i < IndexSetSize; i++) { duke@435: assert(_indexedFreeList[i].size() == (size_t) i, duke@435: "Indexed free list sizes are incorrect"); duke@435: _indexedFreeList[i].reset(IndexSetSize); duke@435: assert(_indexedFreeList[i].count() == 0, "reset check failed"); duke@435: assert(_indexedFreeList[i].head() == NULL, "reset check failed"); duke@435: assert(_indexedFreeList[i].tail() == NULL, "reset check failed"); duke@435: assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed"); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::reset(MemRegion mr) { duke@435: resetIndexedFreeListArray(); duke@435: dictionary()->reset(); duke@435: if (BlockOffsetArrayUseUnallocatedBlock) { duke@435: assert(end() == mr.end(), "We are compacting to the bottom of CMS gen"); duke@435: // Everything's allocated until proven otherwise. duke@435: _bt.set_unallocated_block(end()); duke@435: } duke@435: if (!mr.is_empty()) { duke@435: assert(mr.word_size() >= MinChunkSize, "Chunk size is too small"); duke@435: _bt.single_block(mr.start(), mr.word_size()); duke@435: FreeChunk* fc = (FreeChunk*) mr.start(); duke@435: fc->setSize(mr.word_size()); duke@435: if (mr.word_size() >= IndexSetSize ) { duke@435: returnChunkToDictionary(fc); duke@435: } else { duke@435: _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); duke@435: _indexedFreeList[mr.word_size()].returnChunkAtHead(fc); duke@435: } duke@435: } duke@435: _promoInfo.reset(); duke@435: _smallLinearAllocBlock._ptr = NULL; duke@435: _smallLinearAllocBlock._word_size = 0; duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::reset_after_compaction() { duke@435: // Reset the space to the new reality - one free chunk. duke@435: MemRegion mr(compaction_top(), end()); duke@435: reset(mr); duke@435: // Now refill the linear allocation block(s) if possible. duke@435: if (_adaptive_freelists) { duke@435: refillLinearAllocBlocksIfNeeded(); duke@435: } else { duke@435: // Place as much of mr in the linAB as we can get, duke@435: // provided it was big enough to go into the dictionary. duke@435: FreeChunk* fc = dictionary()->findLargestDict(); duke@435: if (fc != NULL) { duke@435: assert(fc->size() == mr.word_size(), duke@435: "Why was the chunk broken up?"); duke@435: removeChunkFromDictionary(fc); duke@435: HeapWord* addr = (HeapWord*) fc; duke@435: _smallLinearAllocBlock.set(addr, fc->size() , duke@435: 1024*SmallForLinearAlloc, fc->size()); duke@435: // Note that _unallocated_block is not updated here. duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Walks the entire dictionary, returning a coterminal duke@435: // chunk, if it exists. Use with caution since it involves duke@435: // a potentially complete walk of a potentially large tree. duke@435: FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { duke@435: duke@435: assert_lock_strong(&_freelistLock); duke@435: duke@435: return dictionary()->find_chunk_ends_at(end()); duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: _indexedFreeList[i].allocation_stats()->set_returnedBytes(0); duke@435: } duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { duke@435: size_t sum = 0; duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: sum += _indexedFreeList[i].allocation_stats()->returnedBytes(); duke@435: } duke@435: return sum; duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { duke@435: size_t count = 0; duke@435: for (int i = MinChunkSize; i < IndexSetSize; i++) { duke@435: debug_only( duke@435: ssize_t total_list_count = 0; duke@435: for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; duke@435: fc = fc->next()) { duke@435: total_list_count++; duke@435: } duke@435: assert(total_list_count == _indexedFreeList[i].count(), duke@435: "Count in list is incorrect"); duke@435: ) duke@435: count += _indexedFreeList[i].count(); duke@435: } duke@435: return count; duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::totalCount() { duke@435: size_t num = totalCountInIndexedFreeLists(); duke@435: num += dictionary()->totalCount(); duke@435: if (_smallLinearAllocBlock._word_size != 0) { duke@435: num++; duke@435: } duke@435: return num; duke@435: } duke@435: #endif duke@435: duke@435: bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { duke@435: FreeChunk* fc = (FreeChunk*) p; duke@435: return fc->isFree(); duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::used() const { duke@435: return capacity() - free(); duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::free() const { duke@435: // "MT-safe, but not MT-precise"(TM), if you will: i.e. duke@435: // if you do this while the structures are in flux you duke@435: // may get an approximate answer only; for instance duke@435: // because there is concurrent allocation either duke@435: // directly by mutators or for promotion during a GC. duke@435: // It's "MT-safe", however, in the sense that you are guaranteed duke@435: // not to crash and burn, for instance, because of walking duke@435: // pointers that could disappear as you were walking them. duke@435: // The approximation is because the various components duke@435: // that are read below are not read atomically (and duke@435: // further the computation of totalSizeInIndexedFreeLists() duke@435: // is itself a non-atomic computation. The normal use of duke@435: // this is during a resize operation at the end of GC duke@435: // and at that time you are guaranteed to get the duke@435: // correct actual value. However, for instance, this is duke@435: // also read completely asynchronously by the "perf-sampler" duke@435: // that supports jvmstat, and you are apt to see the values duke@435: // flicker in such cases. duke@435: assert(_dictionary != NULL, "No _dictionary?"); duke@435: return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) + duke@435: totalSizeInIndexedFreeLists() + duke@435: _smallLinearAllocBlock._word_size) * HeapWordSize; duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::max_alloc_in_words() const { duke@435: assert(_dictionary != NULL, "No _dictionary?"); duke@435: assert_locked(); duke@435: size_t res = _dictionary->maxChunkSize(); duke@435: res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, duke@435: (size_t) SmallForLinearAlloc - 1)); duke@435: // XXX the following could potentially be pretty slow; duke@435: // should one, pesimally for the rare cases when res duke@435: // caclulated above is less than IndexSetSize, duke@435: // just return res calculated above? My reasoning was that duke@435: // those cases will be so rare that the extra time spent doesn't duke@435: // really matter.... duke@435: // Note: do not change the loop test i >= res + IndexSetStride duke@435: // to i > res below, because i is unsigned and res may be zero. duke@435: for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; duke@435: i -= IndexSetStride) { duke@435: if (_indexedFreeList[i].head() != NULL) { duke@435: assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); duke@435: return i; duke@435: } duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::reportFreeListStatistics() const { duke@435: assert_lock_strong(&_freelistLock); duke@435: assert(PrintFLSStatistics != 0, "Reporting error"); duke@435: _dictionary->reportStatistics(); duke@435: if (PrintFLSStatistics > 1) { duke@435: reportIndexedFreeListStatistics(); duke@435: size_t totalSize = totalSizeInIndexedFreeLists() + duke@435: _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); duke@435: gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag()); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { duke@435: assert_lock_strong(&_freelistLock); duke@435: gclog_or_tty->print("Statistics for IndexedFreeLists:\n" duke@435: "--------------------------------\n"); duke@435: size_t totalSize = totalSizeInIndexedFreeLists(); duke@435: size_t freeBlocks = numFreeBlocksInIndexedFreeLists(); duke@435: gclog_or_tty->print("Total Free Space: %d\n", totalSize); duke@435: gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists()); duke@435: gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks); duke@435: if (freeBlocks != 0) { duke@435: gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks); duke@435: } duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { duke@435: size_t res = 0; duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: debug_only( duke@435: ssize_t recount = 0; duke@435: for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; duke@435: fc = fc->next()) { duke@435: recount += 1; duke@435: } duke@435: assert(recount == _indexedFreeList[i].count(), duke@435: "Incorrect count in list"); duke@435: ) duke@435: res += _indexedFreeList[i].count(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { duke@435: for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { duke@435: if (_indexedFreeList[i].head() != NULL) { duke@435: assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList"); duke@435: return (size_t)i; duke@435: } duke@435: } duke@435: return 0; duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::set_end(HeapWord* value) { duke@435: HeapWord* prevEnd = end(); duke@435: assert(prevEnd != value, "unnecessary set_end call"); duke@435: assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); duke@435: _end = value; duke@435: if (prevEnd != NULL) { duke@435: // Resize the underlying block offset table. duke@435: _bt.resize(pointer_delta(value, bottom())); duke@435: if (value <= prevEnd) { duke@435: assert(value >= unallocated_block(), "New end is below unallocated block"); duke@435: } else { duke@435: // Now, take this new chunk and add it to the free blocks. duke@435: // Note that the BOT has not yet been updated for this block. duke@435: size_t newFcSize = pointer_delta(value, prevEnd); duke@435: // XXX This is REALLY UGLY and should be fixed up. XXX duke@435: if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) { duke@435: // Mark the boundary of the new block in BOT duke@435: _bt.mark_block(prevEnd, value); duke@435: // put it all in the linAB duke@435: if (ParallelGCThreads == 0) { duke@435: _smallLinearAllocBlock._ptr = prevEnd; duke@435: _smallLinearAllocBlock._word_size = newFcSize; duke@435: repairLinearAllocBlock(&_smallLinearAllocBlock); duke@435: } else { // ParallelGCThreads > 0 duke@435: MutexLockerEx x(parDictionaryAllocLock(), duke@435: Mutex::_no_safepoint_check_flag); duke@435: _smallLinearAllocBlock._ptr = prevEnd; duke@435: _smallLinearAllocBlock._word_size = newFcSize; duke@435: repairLinearAllocBlock(&_smallLinearAllocBlock); duke@435: } duke@435: // Births of chunks put into a LinAB are not recorded. Births duke@435: // of chunks as they are allocated out of a LinAB are. duke@435: } else { duke@435: // Add the block to the free lists, if possible coalescing it duke@435: // with the last free block, and update the BOT and census data. duke@435: addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: class FreeListSpace_DCTOC : public Filtering_DCTOC { duke@435: CompactibleFreeListSpace* _cfls; duke@435: CMSCollector* _collector; duke@435: protected: duke@435: // Override. duke@435: #define walk_mem_region_with_cl_DECL(ClosureType) \ duke@435: virtual void walk_mem_region_with_cl(MemRegion mr, \ duke@435: HeapWord* bottom, HeapWord* top, \ duke@435: ClosureType* cl); \ duke@435: void walk_mem_region_with_cl_par(MemRegion mr, \ duke@435: HeapWord* bottom, HeapWord* top, \ duke@435: ClosureType* cl); \ duke@435: void walk_mem_region_with_cl_nopar(MemRegion mr, \ duke@435: HeapWord* bottom, HeapWord* top, \ duke@435: ClosureType* cl) duke@435: walk_mem_region_with_cl_DECL(OopClosure); duke@435: walk_mem_region_with_cl_DECL(FilteringClosure); duke@435: duke@435: public: duke@435: FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, duke@435: CMSCollector* collector, duke@435: OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary) : duke@435: Filtering_DCTOC(sp, cl, precision, boundary), duke@435: _cfls(sp), _collector(collector) {} duke@435: }; duke@435: duke@435: // We de-virtualize the block-related calls below, since we know that our duke@435: // space is a CompactibleFreeListSpace. duke@435: #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ duke@435: void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ duke@435: HeapWord* bottom, \ duke@435: HeapWord* top, \ duke@435: ClosureType* cl) { \ duke@435: if (SharedHeap::heap()->n_par_threads() > 0) { \ duke@435: walk_mem_region_with_cl_par(mr, bottom, top, cl); \ duke@435: } else { \ duke@435: walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ duke@435: } \ duke@435: } \ duke@435: void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ duke@435: HeapWord* bottom, \ duke@435: HeapWord* top, \ duke@435: ClosureType* cl) { \ duke@435: /* Skip parts that are before "mr", in case "block_start" sent us \ duke@435: back too far. */ \ duke@435: HeapWord* mr_start = mr.start(); \ duke@435: size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ duke@435: HeapWord* next = bottom + bot_size; \ duke@435: while (next < mr_start) { \ duke@435: bottom = next; \ duke@435: bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ duke@435: next = bottom + bot_size; \ duke@435: } \ duke@435: \ duke@435: while (bottom < top) { \ duke@435: if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ duke@435: !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ duke@435: oop(bottom)) && \ duke@435: !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ duke@435: size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ duke@435: bottom += _cfls->adjustObjectSize(word_sz); \ duke@435: } else { \ duke@435: bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ duke@435: } \ duke@435: } \ duke@435: } \ duke@435: void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ duke@435: HeapWord* bottom, \ duke@435: HeapWord* top, \ duke@435: ClosureType* cl) { \ duke@435: /* Skip parts that are before "mr", in case "block_start" sent us \ duke@435: back too far. */ \ duke@435: HeapWord* mr_start = mr.start(); \ duke@435: size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ duke@435: HeapWord* next = bottom + bot_size; \ duke@435: while (next < mr_start) { \ duke@435: bottom = next; \ duke@435: bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ duke@435: next = bottom + bot_size; \ duke@435: } \ duke@435: \ duke@435: while (bottom < top) { \ duke@435: if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ duke@435: !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ duke@435: oop(bottom)) && \ duke@435: !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ duke@435: size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ duke@435: bottom += _cfls->adjustObjectSize(word_sz); \ duke@435: } else { \ duke@435: bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ duke@435: } \ duke@435: } \ duke@435: } duke@435: duke@435: // (There are only two of these, rather than N, because the split is due duke@435: // only to the introduction of the FilteringClosure, a local part of the duke@435: // impl of this abstraction.) duke@435: FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) duke@435: FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) duke@435: duke@435: DirtyCardToOopClosure* duke@435: CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary) { duke@435: return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); duke@435: } duke@435: duke@435: duke@435: // Note on locking for the space iteration functions: duke@435: // since the collector's iteration activities are concurrent with duke@435: // allocation activities by mutators, absent a suitable mutual exclusion duke@435: // mechanism the iterators may go awry. For instace a block being iterated duke@435: // may suddenly be allocated or divided up and part of it allocated and duke@435: // so on. duke@435: duke@435: // Apply the given closure to each block in the space. duke@435: void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord *cur, *limit; duke@435: for (cur = bottom(), limit = end(); cur < limit; duke@435: cur += cl->do_blk_careful(cur)); duke@435: } duke@435: duke@435: // Apply the given closure to each block in the space. duke@435: void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord *cur, *limit; duke@435: for (cur = bottom(), limit = end(); cur < limit; duke@435: cur += cl->do_blk(cur)); duke@435: } duke@435: duke@435: // Apply the given closure to each oop in the space. duke@435: void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord *cur, *limit; duke@435: size_t curSize; duke@435: for (cur = bottom(), limit = end(); cur < limit; duke@435: cur += curSize) { duke@435: curSize = block_size(cur); duke@435: if (block_is_obj(cur)) { duke@435: oop(cur)->oop_iterate(cl); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Apply the given closure to each oop in the space \intersect memory region. duke@435: void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: if (is_empty()) { duke@435: return; duke@435: } duke@435: MemRegion cur = MemRegion(bottom(), end()); duke@435: mr = mr.intersection(cur); duke@435: if (mr.is_empty()) { duke@435: return; duke@435: } duke@435: if (mr.equals(cur)) { duke@435: oop_iterate(cl); duke@435: return; duke@435: } duke@435: assert(mr.end() <= end(), "just took an intersection above"); duke@435: HeapWord* obj_addr = block_start(mr.start()); duke@435: HeapWord* t = mr.end(); duke@435: duke@435: SpaceMemRegionOopsIterClosure smr_blk(cl, mr); duke@435: if (block_is_obj(obj_addr)) { duke@435: // Handle first object specially. duke@435: oop obj = oop(obj_addr); duke@435: obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); duke@435: } else { duke@435: FreeChunk* fc = (FreeChunk*)obj_addr; duke@435: obj_addr += fc->size(); duke@435: } duke@435: while (obj_addr < t) { duke@435: HeapWord* obj = obj_addr; duke@435: obj_addr += block_size(obj_addr); duke@435: // If "obj_addr" is not greater than top, then the duke@435: // entire object "obj" is within the region. duke@435: if (obj_addr <= t) { duke@435: if (block_is_obj(obj)) { duke@435: oop(obj)->oop_iterate(cl); duke@435: } duke@435: } else { duke@435: // "obj" extends beyond end of region duke@435: if (block_is_obj(obj)) { duke@435: oop(obj)->oop_iterate(&smr_blk); duke@435: } duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: duke@435: // NOTE: In the following methods, in order to safely be able to duke@435: // apply the closure to an object, we need to be sure that the duke@435: // object has been initialized. We are guaranteed that an object duke@435: // is initialized if we are holding the Heap_lock with the duke@435: // world stopped. duke@435: void CompactibleFreeListSpace::verify_objects_initialized() const { duke@435: if (is_init_completed()) { duke@435: assert_locked_or_safepoint(Heap_lock); duke@435: if (Universe::is_fully_initialized()) { duke@435: guarantee(SafepointSynchronize::is_at_safepoint(), duke@435: "Required for objects to be initialized"); duke@435: } duke@435: } // else make a concession at vm start-up duke@435: } duke@435: duke@435: // Apply the given closure to each object in the space duke@435: void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { duke@435: assert_lock_strong(freelistLock()); duke@435: NOT_PRODUCT(verify_objects_initialized()); duke@435: HeapWord *cur, *limit; duke@435: size_t curSize; duke@435: for (cur = bottom(), limit = end(); cur < limit; duke@435: cur += curSize) { duke@435: curSize = block_size(cur); duke@435: if (block_is_obj(cur)) { duke@435: blk->do_object(oop(cur)); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, duke@435: UpwardsObjectClosure* cl) { duke@435: assert_locked(); duke@435: NOT_PRODUCT(verify_objects_initialized()); duke@435: Space::object_iterate_mem(mr, cl); duke@435: } duke@435: duke@435: // Callers of this iterator beware: The closure application should duke@435: // be robust in the face of uninitialized objects and should (always) duke@435: // return a correct size so that the next addr + size below gives us a duke@435: // valid block boundary. [See for instance, duke@435: // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() duke@435: // in ConcurrentMarkSweepGeneration.cpp.] duke@435: HeapWord* duke@435: CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord *addr, *last; duke@435: size_t size; duke@435: for (addr = bottom(), last = end(); duke@435: addr < last; addr += size) { duke@435: FreeChunk* fc = (FreeChunk*)addr; duke@435: if (fc->isFree()) { duke@435: // Since we hold the free list lock, which protects direct duke@435: // allocation in this generation by mutators, a free object duke@435: // will remain free throughout this iteration code. duke@435: size = fc->size(); duke@435: } else { duke@435: // Note that the object need not necessarily be initialized, duke@435: // because (for instance) the free list lock does NOT protect duke@435: // object initialization. The closure application below must duke@435: // therefore be correct in the face of uninitialized objects. duke@435: size = cl->do_object_careful(oop(addr)); duke@435: if (size == 0) { duke@435: // An unparsable object found. Signal early termination. duke@435: return addr; duke@435: } duke@435: } duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: // Callers of this iterator beware: The closure application should duke@435: // be robust in the face of uninitialized objects and should (always) duke@435: // return a correct size so that the next addr + size below gives us a duke@435: // valid block boundary. [See for instance, duke@435: // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() duke@435: // in ConcurrentMarkSweepGeneration.cpp.] duke@435: HeapWord* duke@435: CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, duke@435: ObjectClosureCareful* cl) { duke@435: assert_lock_strong(freelistLock()); duke@435: // Can't use used_region() below because it may not necessarily duke@435: // be the same as [bottom(),end()); although we could duke@435: // use [used_region().start(),round_to(used_region().end(),CardSize)), duke@435: // that appears too cumbersome, so we just do the simpler check duke@435: // in the assertion below. duke@435: assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), duke@435: "mr should be non-empty and within used space"); duke@435: HeapWord *addr, *end; duke@435: size_t size; duke@435: for (addr = block_start_careful(mr.start()), end = mr.end(); duke@435: addr < end; addr += size) { duke@435: FreeChunk* fc = (FreeChunk*)addr; duke@435: if (fc->isFree()) { duke@435: // Since we hold the free list lock, which protects direct duke@435: // allocation in this generation by mutators, a free object duke@435: // will remain free throughout this iteration code. duke@435: size = fc->size(); duke@435: } else { duke@435: // Note that the object need not necessarily be initialized, duke@435: // because (for instance) the free list lock does NOT protect duke@435: // object initialization. The closure application below must duke@435: // therefore be correct in the face of uninitialized objects. duke@435: size = cl->do_object_careful_m(oop(addr), mr); duke@435: if (size == 0) { duke@435: // An unparsable object found. Signal early termination. duke@435: return addr; duke@435: } duke@435: } duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: duke@435: HeapWord* CompactibleFreeListSpace::block_start(const void* p) const { duke@435: NOT_PRODUCT(verify_objects_initialized()); duke@435: return _bt.block_start(p); duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { duke@435: return _bt.block_start_careful(p); duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { duke@435: NOT_PRODUCT(verify_objects_initialized()); duke@435: assert(MemRegion(bottom(), end()).contains(p), "p not in space"); duke@435: // This must be volatile, or else there is a danger that the compiler duke@435: // will compile the code below into a sometimes-infinite loop, by keeping duke@435: // the value read the first time in a register. duke@435: oop o = (oop)p; duke@435: volatile oop* second_word_addr = o->klass_addr(); duke@435: while (true) { duke@435: klassOop k = (klassOop)(*second_word_addr); duke@435: // We must do this until we get a consistent view of the object. duke@435: if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { duke@435: FreeChunk* fc = (FreeChunk*)p; duke@435: volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); duke@435: size_t res = (*sz_addr); duke@435: klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. duke@435: if (k == k2) { duke@435: assert(res != 0, "Block size should not be 0"); duke@435: return res; duke@435: } duke@435: } else if (k != NULL) { duke@435: assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); duke@435: assert(o->is_parsable(), "Should be parsable"); duke@435: assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); duke@435: size_t res = o->size_given_klass(k->klass_part()); duke@435: res = adjustObjectSize(res); duke@435: assert(res != 0, "Block size should not be 0"); duke@435: return res; duke@435: } duke@435: } duke@435: } duke@435: duke@435: // A variant of the above that uses the Printezis bits for duke@435: // unparsable but allocated objects. This avoids any possible duke@435: // stalls waiting for mutators to initialize objects, and is duke@435: // thus potentially faster than the variant above. However, duke@435: // this variant may return a zero size for a block that is duke@435: // under mutation and for which a consistent size cannot be duke@435: // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). duke@435: size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, duke@435: const CMSCollector* c) duke@435: const { duke@435: assert(MemRegion(bottom(), end()).contains(p), "p not in space"); duke@435: // This must be volatile, or else there is a danger that the compiler duke@435: // will compile the code below into a sometimes-infinite loop, by keeping duke@435: // the value read the first time in a register. duke@435: oop o = (oop)p; duke@435: volatile oop* second_word_addr = o->klass_addr(); duke@435: DEBUG_ONLY(uint loops = 0;) duke@435: while (true) { duke@435: klassOop k = (klassOop)(*second_word_addr); duke@435: // We must do this until we get a consistent view of the object. duke@435: if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { duke@435: FreeChunk* fc = (FreeChunk*)p; duke@435: volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); duke@435: size_t res = (*sz_addr); duke@435: klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. duke@435: if (k == k2) { duke@435: assert(res != 0, "Block size should not be 0"); duke@435: assert(loops == 0, "Should be 0"); duke@435: return res; duke@435: } duke@435: } else if (k != NULL && o->is_parsable()) { duke@435: assert(k->is_oop(), "Should really be klass oop."); duke@435: assert(o->is_oop(), "Should be an oop"); duke@435: size_t res = o->size_given_klass(k->klass_part()); duke@435: res = adjustObjectSize(res); duke@435: assert(res != 0, "Block size should not be 0"); duke@435: return res; duke@435: } else { duke@435: return c->block_size_if_printezis_bits(p); duke@435: } duke@435: assert(loops == 0, "Can loop at most once"); duke@435: DEBUG_ONLY(loops++;) duke@435: } duke@435: } duke@435: duke@435: size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { duke@435: NOT_PRODUCT(verify_objects_initialized()); duke@435: assert(MemRegion(bottom(), end()).contains(p), "p not in space"); duke@435: FreeChunk* fc = (FreeChunk*)p; duke@435: if (fc->isFree()) { duke@435: return fc->size(); duke@435: } else { duke@435: // Ignore mark word because this may be a recently promoted duke@435: // object whose mark word is used to chain together grey duke@435: // objects (the last one would have a null value). duke@435: assert(oop(p)->is_oop(true), "Should be an oop"); duke@435: return adjustObjectSize(oop(p)->size()); duke@435: } duke@435: } duke@435: duke@435: // This implementation assumes that the property of "being an object" is duke@435: // stable. But being a free chunk may not be (because of parallel duke@435: // promotion.) duke@435: bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { duke@435: FreeChunk* fc = (FreeChunk*)p; duke@435: assert(is_in_reserved(p), "Should be in space"); duke@435: // When doing a mark-sweep-compact of the CMS generation, this duke@435: // assertion may fail because prepare_for_compaction() uses duke@435: // space that is garbage to maintain information on ranges of duke@435: // live objects so that these live ranges can be moved as a whole. duke@435: // Comment out this assertion until that problem can be solved duke@435: // (i.e., that the block start calculation may look at objects duke@435: // at address below "p" in finding the object that contains "p" duke@435: // and those objects (if garbage) may have been modified to hold duke@435: // live range information. duke@435: // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary"); duke@435: klassOop k = oop(p)->klass(); duke@435: intptr_t ki = (intptr_t)k; duke@435: if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false; duke@435: if (k != NULL) { duke@435: // Ignore mark word because it may have been used to duke@435: // chain together promoted objects (the last one duke@435: // would have a null value). duke@435: assert(oop(p)->is_oop(true), "Should be an oop"); duke@435: return true; duke@435: } else { duke@435: return false; // Was not an object at the start of collection. duke@435: } duke@435: } duke@435: duke@435: // Check if the object is alive. This fact is checked either by consulting duke@435: // the main marking bitmap in the sweeping phase or, if it's a permanent duke@435: // generation and we're not in the sweeping phase, by checking the duke@435: // perm_gen_verify_bit_map where we store the "deadness" information if duke@435: // we did not sweep the perm gen in the most recent previous GC cycle. duke@435: bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { duke@435: assert (block_is_obj(p), "The address should point to an object"); duke@435: duke@435: // If we're sweeping, we use object liveness information from the main bit map duke@435: // for both perm gen and old gen. duke@435: // We don't need to lock the bitmap (live_map or dead_map below), because duke@435: // EITHER we are in the middle of the sweeping phase, and the duke@435: // main marking bit map (live_map below) is locked, duke@435: // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) duke@435: // is stable, because it's mutated only in the sweeping phase. duke@435: if (_collector->abstract_state() == CMSCollector::Sweeping) { duke@435: CMSBitMap* live_map = _collector->markBitMap(); duke@435: return live_map->isMarked((HeapWord*) p); duke@435: } else { duke@435: // If we're not currently sweeping and we haven't swept the perm gen in duke@435: // the previous concurrent cycle then we may have dead but unswept objects duke@435: // in the perm gen. In this case, we use the "deadness" information duke@435: // that we had saved in perm_gen_verify_bit_map at the last sweep. duke@435: if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { duke@435: if (_collector->verifying()) { duke@435: CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); duke@435: // Object is marked in the dead_map bitmap at the previous sweep duke@435: // when we know that it's dead; if the bitmap is not allocated then duke@435: // the object is alive. duke@435: return (dead_map->sizeInBits() == 0) // bit_map has been allocated duke@435: || !dead_map->par_isMarked((HeapWord*) p); duke@435: } else { duke@435: return false; // We can't say for sure if it's live, so we say that it's dead. duke@435: } duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { duke@435: FreeChunk* fc = (FreeChunk*)p; duke@435: assert(is_in_reserved(p), "Should be in space"); duke@435: assert(_bt.block_start(p) == p, "Should be a block boundary"); duke@435: if (!fc->isFree()) { duke@435: // Ignore mark word because it may have been used to duke@435: // chain together promoted objects (the last one duke@435: // would have a null value). duke@435: assert(oop(p)->is_oop(true), "Should be an oop"); duke@435: return true; duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: // "MT-safe but not guaranteed MT-precise" (TM); you may get an duke@435: // approximate answer if you don't hold the freelistlock when you call this. duke@435: size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { duke@435: size_t size = 0; duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: debug_only( duke@435: // We may be calling here without the lock in which case we duke@435: // won't do this modest sanity check. duke@435: if (freelistLock()->owned_by_self()) { duke@435: size_t total_list_size = 0; duke@435: for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; duke@435: fc = fc->next()) { duke@435: total_list_size += i; duke@435: } duke@435: assert(total_list_size == i * _indexedFreeList[i].count(), duke@435: "Count in list is incorrect"); duke@435: } duke@435: ) duke@435: size += i * _indexedFreeList[i].count(); duke@435: } duke@435: return size; duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { duke@435: MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); duke@435: return allocate(size); duke@435: } duke@435: duke@435: HeapWord* duke@435: CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { duke@435: return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::allocate(size_t size) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord* res = NULL; duke@435: assert(size == adjustObjectSize(size), duke@435: "use adjustObjectSize() before calling into allocate()"); duke@435: duke@435: if (_adaptive_freelists) { duke@435: res = allocate_adaptive_freelists(size); duke@435: } else { // non-adaptive free lists duke@435: res = allocate_non_adaptive_freelists(size); duke@435: } duke@435: duke@435: if (res != NULL) { duke@435: // check that res does lie in this space! duke@435: assert(is_in_reserved(res), "Not in this space!"); duke@435: assert(is_aligned((void*)res), "alignment check"); duke@435: duke@435: FreeChunk* fc = (FreeChunk*)res; duke@435: fc->markNotFree(); duke@435: assert(!fc->isFree(), "shouldn't be marked free"); duke@435: assert(oop(fc)->klass() == NULL, "should look uninitialized"); duke@435: // Verify that the block offset table shows this to duke@435: // be a single block, but not one which is unallocated. duke@435: _bt.verify_single_block(res, size); duke@435: _bt.verify_not_unallocated(res, size); duke@435: // mangle a just allocated object with a distinct pattern. duke@435: debug_only(fc->mangleAllocated(size)); duke@435: } duke@435: duke@435: return res; duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) { duke@435: HeapWord* res = NULL; duke@435: // try and use linear allocation for smaller blocks duke@435: if (size < _smallLinearAllocBlock._allocation_size_limit) { duke@435: // if successful, the following also adjusts block offset table duke@435: res = getChunkFromSmallLinearAllocBlock(size); duke@435: } duke@435: // Else triage to indexed lists for smaller sizes duke@435: if (res == NULL) { duke@435: if (size < SmallForDictionary) { duke@435: res = (HeapWord*) getChunkFromIndexedFreeList(size); duke@435: } else { duke@435: // else get it from the big dictionary; if even this doesn't duke@435: // work we are out of luck. duke@435: res = (HeapWord*)getChunkFromDictionaryExact(size); duke@435: } duke@435: } duke@435: duke@435: return res; duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { duke@435: assert_lock_strong(freelistLock()); duke@435: HeapWord* res = NULL; duke@435: assert(size == adjustObjectSize(size), duke@435: "use adjustObjectSize() before calling into allocate()"); duke@435: duke@435: // Strategy duke@435: // if small duke@435: // exact size from small object indexed list if small duke@435: // small or large linear allocation block (linAB) as appropriate duke@435: // take from lists of greater sized chunks duke@435: // else duke@435: // dictionary duke@435: // small or large linear allocation block if it has the space duke@435: // Try allocating exact size from indexTable first duke@435: if (size < IndexSetSize) { duke@435: res = (HeapWord*) getChunkFromIndexedFreeList(size); duke@435: if(res != NULL) { duke@435: assert(res != (HeapWord*)_indexedFreeList[size].head(), duke@435: "Not removed from free list"); duke@435: // no block offset table adjustment is necessary on blocks in duke@435: // the indexed lists. duke@435: duke@435: // Try allocating from the small LinAB duke@435: } else if (size < _smallLinearAllocBlock._allocation_size_limit && duke@435: (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { duke@435: // if successful, the above also adjusts block offset table duke@435: // Note that this call will refill the LinAB to duke@435: // satisfy the request. This is different that duke@435: // evm. duke@435: // Don't record chunk off a LinAB? smallSplitBirth(size); duke@435: duke@435: } else { duke@435: // Raid the exact free lists larger than size, even if they are not duke@435: // overpopulated. duke@435: res = (HeapWord*) getChunkFromGreater(size); duke@435: } duke@435: } else { duke@435: // Big objects get allocated directly from the dictionary. duke@435: res = (HeapWord*) getChunkFromDictionaryExact(size); duke@435: if (res == NULL) { duke@435: // Try hard not to fail since an allocation failure will likely duke@435: // trigger a synchronous GC. Try to get the space from the duke@435: // allocation blocks. duke@435: res = getChunkFromSmallLinearAllocBlockRemainder(size); duke@435: } duke@435: } duke@435: duke@435: return res; duke@435: } duke@435: duke@435: // A worst-case estimate of the space required (in HeapWords) to expand the heap duke@435: // when promoting obj. duke@435: size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { duke@435: // Depending on the object size, expansion may require refilling either a duke@435: // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize duke@435: // is added because the dictionary may over-allocate to avoid fragmentation. duke@435: size_t space = obj_size; duke@435: if (!_adaptive_freelists) { duke@435: space = MAX2(space, _smallLinearAllocBlock._refillSize); duke@435: } duke@435: space += _promoInfo.refillSize() + 2 * MinChunkSize; duke@435: return space; duke@435: } duke@435: duke@435: FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { duke@435: FreeChunk* ret; duke@435: duke@435: assert(numWords >= MinChunkSize, "Size is less than minimum"); duke@435: assert(linearAllocationWouldFail() || bestFitFirst(), duke@435: "Should not be here"); duke@435: duke@435: size_t i; duke@435: size_t currSize = numWords + MinChunkSize; duke@435: assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); duke@435: for (i = currSize; i < IndexSetSize; i += IndexSetStride) { duke@435: FreeList* fl = &_indexedFreeList[i]; duke@435: if (fl->head()) { duke@435: ret = getFromListGreater(fl, numWords); duke@435: assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); duke@435: return ret; duke@435: } duke@435: } duke@435: duke@435: currSize = MAX2((size_t)SmallForDictionary, duke@435: (size_t)(numWords + MinChunkSize)); duke@435: duke@435: /* Try to get a chunk that satisfies request, while avoiding duke@435: fragmentation that can't be handled. */ duke@435: { duke@435: ret = dictionary()->getChunk(currSize); duke@435: if (ret != NULL) { duke@435: assert(ret->size() - numWords >= MinChunkSize, duke@435: "Chunk is too small"); duke@435: _bt.allocated((HeapWord*)ret, ret->size()); duke@435: /* Carve returned chunk. */ duke@435: (void) splitChunkAndReturnRemainder(ret, numWords); duke@435: /* Label this as no longer a free chunk. */ duke@435: assert(ret->isFree(), "This chunk should be free"); duke@435: ret->linkPrev(NULL); duke@435: } duke@435: assert(ret == NULL || ret->isFree(), "Should be returning a free chunk"); duke@435: return ret; duke@435: } duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) duke@435: const { duke@435: assert(fc->size() < IndexSetSize, "Size of chunk is too large"); duke@435: return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); duke@435: } duke@435: duke@435: bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { duke@435: if (fc->size() >= IndexSetSize) { duke@435: return dictionary()->verifyChunkInFreeLists(fc); duke@435: } else { duke@435: return verifyChunkInIndexedFreeLists(fc); duke@435: } duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void CompactibleFreeListSpace::assert_locked() const { duke@435: CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); duke@435: } duke@435: #endif duke@435: duke@435: FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { duke@435: // In the parallel case, the main thread holds the free list lock duke@435: // on behalf the parallel threads. duke@435: assert_locked(); duke@435: FreeChunk* fc; duke@435: { duke@435: // If GC is parallel, this might be called by several threads. duke@435: // This should be rare enough that the locking overhead won't affect duke@435: // the sequential code. duke@435: MutexLockerEx x(parDictionaryAllocLock(), duke@435: Mutex::_no_safepoint_check_flag); duke@435: fc = getChunkFromDictionary(size); duke@435: } duke@435: if (fc != NULL) { duke@435: fc->dontCoalesce(); duke@435: assert(fc->isFree(), "Should be free, but not coalescable"); duke@435: // Verify that the block offset table shows this to duke@435: // be a single block, but not one which is unallocated. duke@435: _bt.verify_single_block((HeapWord*)fc, fc->size()); duke@435: _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); duke@435: } duke@435: return fc; duke@435: } duke@435: coleenp@548: oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { duke@435: assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); duke@435: assert_locked(); duke@435: duke@435: // if we are tracking promotions, then first ensure space for duke@435: // promotion (including spooling space for saving header if necessary). duke@435: // then allocate and copy, then track promoted info if needed. duke@435: // When tracking (see PromotionInfo::track()), the mark word may duke@435: // be displaced and in this case restoration of the mark word duke@435: // occurs in the (oop_since_save_marks_)iterate phase. duke@435: if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { duke@435: return NULL; duke@435: } duke@435: // Call the allocate(size_t, bool) form directly to avoid the duke@435: // additional call through the allocate(size_t) form. Having duke@435: // the compile inline the call is problematic because allocate(size_t) duke@435: // is a virtual method. duke@435: HeapWord* res = allocate(adjustObjectSize(obj_size)); duke@435: if (res != NULL) { duke@435: Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); duke@435: // if we should be tracking promotions, do so. duke@435: if (_promoInfo.tracking()) { duke@435: _promoInfo.track((PromotedObject*)res); duke@435: } duke@435: } duke@435: return oop(res); duke@435: } duke@435: duke@435: HeapWord* duke@435: CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { duke@435: assert_locked(); duke@435: assert(size >= MinChunkSize, "minimum chunk size"); duke@435: assert(size < _smallLinearAllocBlock._allocation_size_limit, duke@435: "maximum from smallLinearAllocBlock"); duke@435: return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); duke@435: } duke@435: duke@435: HeapWord* duke@435: CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, duke@435: size_t size) { duke@435: assert_locked(); duke@435: assert(size >= MinChunkSize, "too small"); duke@435: HeapWord* res = NULL; duke@435: // Try to do linear allocation from blk, making sure that duke@435: if (blk->_word_size == 0) { duke@435: // We have probably been unable to fill this either in the prologue or duke@435: // when it was exhausted at the last linear allocation. Bail out until duke@435: // next time. duke@435: assert(blk->_ptr == NULL, "consistency check"); duke@435: return NULL; duke@435: } duke@435: assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check"); duke@435: res = getChunkFromLinearAllocBlockRemainder(blk, size); duke@435: if (res != NULL) return res; duke@435: duke@435: // about to exhaust this linear allocation block duke@435: if (blk->_word_size == size) { // exactly satisfied duke@435: res = blk->_ptr; duke@435: _bt.allocated(res, blk->_word_size); duke@435: } else if (size + MinChunkSize <= blk->_refillSize) { duke@435: // Update _unallocated_block if the size is such that chunk would be duke@435: // returned to the indexed free list. All other chunks in the indexed duke@435: // free lists are allocated from the dictionary so that _unallocated_block duke@435: // has already been adjusted for them. Do it here so that the cost duke@435: // for all chunks added back to the indexed free lists. duke@435: if (blk->_word_size < SmallForDictionary) { duke@435: _bt.allocated(blk->_ptr, blk->_word_size); duke@435: } duke@435: // Return the chunk that isn't big enough, and then refill below. duke@435: addChunkToFreeLists(blk->_ptr, blk->_word_size); duke@435: _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size)); duke@435: // Don't keep statistics on adding back chunk from a LinAB. duke@435: } else { duke@435: // A refilled block would not satisfy the request. duke@435: return NULL; duke@435: } duke@435: duke@435: blk->_ptr = NULL; blk->_word_size = 0; duke@435: refillLinearAllocBlock(blk); duke@435: assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, duke@435: "block was replenished"); duke@435: if (res != NULL) { duke@435: splitBirth(size); duke@435: repairLinearAllocBlock(blk); duke@435: } else if (blk->_ptr != NULL) { duke@435: res = blk->_ptr; duke@435: size_t blk_size = blk->_word_size; duke@435: blk->_word_size -= size; duke@435: blk->_ptr += size; duke@435: splitBirth(size); duke@435: repairLinearAllocBlock(blk); duke@435: // Update BOT last so that other (parallel) GC threads see a consistent duke@435: // view of the BOT and free blocks. duke@435: // Above must occur before BOT is updated below. duke@435: _bt.split_block(res, blk_size, size); // adjust block offset table duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( duke@435: LinearAllocBlock* blk, duke@435: size_t size) { duke@435: assert_locked(); duke@435: assert(size >= MinChunkSize, "too small"); duke@435: duke@435: HeapWord* res = NULL; duke@435: // This is the common case. Keep it simple. duke@435: if (blk->_word_size >= size + MinChunkSize) { duke@435: assert(blk->_ptr != NULL, "consistency check"); duke@435: res = blk->_ptr; duke@435: // Note that the BOT is up-to-date for the linAB before allocation. It duke@435: // indicates the start of the linAB. The split_block() updates the duke@435: // BOT for the linAB after the allocation (indicates the start of the duke@435: // next chunk to be allocated). duke@435: size_t blk_size = blk->_word_size; duke@435: blk->_word_size -= size; duke@435: blk->_ptr += size; duke@435: splitBirth(size); duke@435: repairLinearAllocBlock(blk); duke@435: // Update BOT last so that other (parallel) GC threads see a consistent duke@435: // view of the BOT and free blocks. duke@435: // Above must occur before BOT is updated below. duke@435: _bt.split_block(res, blk_size, size); // adjust block offset table duke@435: _bt.allocated(res, size); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: FreeChunk* duke@435: CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { duke@435: assert_locked(); duke@435: assert(size < SmallForDictionary, "just checking"); duke@435: FreeChunk* res; duke@435: res = _indexedFreeList[size].getChunkAtHead(); duke@435: if (res == NULL) { duke@435: res = getChunkFromIndexedFreeListHelper(size); duke@435: } duke@435: _bt.verify_not_unallocated((HeapWord*) res, size); duke@435: return res; duke@435: } duke@435: duke@435: FreeChunk* duke@435: CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) { duke@435: assert_locked(); duke@435: FreeChunk* fc = NULL; duke@435: if (size < SmallForDictionary) { duke@435: assert(_indexedFreeList[size].head() == NULL || duke@435: _indexedFreeList[size].surplus() <= 0, duke@435: "List for this size should be empty or under populated"); duke@435: // Try best fit in exact lists before replenishing the list duke@435: if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { duke@435: // Replenish list. duke@435: // duke@435: // Things tried that failed. duke@435: // Tried allocating out of the two LinAB's first before duke@435: // replenishing lists. duke@435: // Tried small linAB of size 256 (size in indexed list) duke@435: // and replenishing indexed lists from the small linAB. duke@435: // duke@435: FreeChunk* newFc = NULL; duke@435: size_t replenish_size = CMSIndexedFreeListReplenish * size; duke@435: if (replenish_size < SmallForDictionary) { duke@435: // Do not replenish from an underpopulated size. duke@435: if (_indexedFreeList[replenish_size].surplus() > 0 && duke@435: _indexedFreeList[replenish_size].head() != NULL) { duke@435: newFc = duke@435: _indexedFreeList[replenish_size].getChunkAtHead(); duke@435: } else { duke@435: newFc = bestFitSmall(replenish_size); duke@435: } duke@435: } duke@435: if (newFc != NULL) { duke@435: splitDeath(replenish_size); duke@435: } else if (replenish_size > size) { duke@435: assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant"); duke@435: newFc = duke@435: getChunkFromIndexedFreeListHelper(replenish_size); duke@435: } duke@435: if (newFc != NULL) { duke@435: assert(newFc->size() == replenish_size, "Got wrong size"); duke@435: size_t i; duke@435: FreeChunk *curFc, *nextFc; duke@435: // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2 duke@435: // The last chunk is not added to the lists but is returned as the duke@435: // free chunk. duke@435: for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), duke@435: i = 0; duke@435: i < (CMSIndexedFreeListReplenish - 1); duke@435: curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), duke@435: i++) { duke@435: curFc->setSize(size); duke@435: // Don't record this as a return in order to try and duke@435: // determine the "returns" from a GC. duke@435: _bt.verify_not_unallocated((HeapWord*) fc, size); duke@435: _indexedFreeList[size].returnChunkAtTail(curFc, false); duke@435: _bt.mark_block((HeapWord*)curFc, size); duke@435: splitBirth(size); duke@435: // Don't record the initial population of the indexed list duke@435: // as a split birth. duke@435: } duke@435: duke@435: // check that the arithmetic was OK above duke@435: assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size, duke@435: "inconsistency in carving newFc"); duke@435: curFc->setSize(size); duke@435: _bt.mark_block((HeapWord*)curFc, size); duke@435: splitBirth(size); duke@435: return curFc; duke@435: } duke@435: } duke@435: } else { duke@435: // Get a free chunk from the free chunk dictionary to be returned to duke@435: // replenish the indexed free list. duke@435: fc = getChunkFromDictionaryExact(size); duke@435: } duke@435: assert(fc == NULL || fc->isFree(), "Should be returning a free chunk"); duke@435: return fc; duke@435: } duke@435: duke@435: FreeChunk* duke@435: CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { duke@435: assert_locked(); duke@435: FreeChunk* fc = _dictionary->getChunk(size); duke@435: if (fc == NULL) { duke@435: return NULL; duke@435: } duke@435: _bt.allocated((HeapWord*)fc, fc->size()); duke@435: if (fc->size() >= size + MinChunkSize) { duke@435: fc = splitChunkAndReturnRemainder(fc, size); duke@435: } duke@435: assert(fc->size() >= size, "chunk too small"); duke@435: assert(fc->size() < size + MinChunkSize, "chunk too big"); duke@435: _bt.verify_single_block((HeapWord*)fc, fc->size()); duke@435: return fc; duke@435: } duke@435: duke@435: FreeChunk* duke@435: CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { duke@435: assert_locked(); duke@435: FreeChunk* fc = _dictionary->getChunk(size); duke@435: if (fc == NULL) { duke@435: return fc; duke@435: } duke@435: _bt.allocated((HeapWord*)fc, fc->size()); duke@435: if (fc->size() == size) { duke@435: _bt.verify_single_block((HeapWord*)fc, size); duke@435: return fc; duke@435: } duke@435: assert(fc->size() > size, "getChunk() guarantee"); duke@435: if (fc->size() < size + MinChunkSize) { duke@435: // Return the chunk to the dictionary and go get a bigger one. duke@435: returnChunkToDictionary(fc); duke@435: fc = _dictionary->getChunk(size + MinChunkSize); duke@435: if (fc == NULL) { duke@435: return NULL; duke@435: } duke@435: _bt.allocated((HeapWord*)fc, fc->size()); duke@435: } duke@435: assert(fc->size() >= size + MinChunkSize, "tautology"); duke@435: fc = splitChunkAndReturnRemainder(fc, size); duke@435: assert(fc->size() == size, "chunk is wrong size"); duke@435: _bt.verify_single_block((HeapWord*)fc, size); duke@435: return fc; duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { duke@435: assert_locked(); duke@435: duke@435: size_t size = chunk->size(); duke@435: _bt.verify_single_block((HeapWord*)chunk, size); duke@435: // adjust _unallocated_block downward, as necessary duke@435: _bt.freed((HeapWord*)chunk, size); duke@435: _dictionary->returnChunk(chunk); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { duke@435: assert_locked(); duke@435: size_t size = fc->size(); duke@435: _bt.verify_single_block((HeapWord*) fc, size); duke@435: _bt.verify_not_unallocated((HeapWord*) fc, size); duke@435: if (_adaptive_freelists) { duke@435: _indexedFreeList[size].returnChunkAtTail(fc); duke@435: } else { duke@435: _indexedFreeList[size].returnChunkAtHead(fc); duke@435: } duke@435: } duke@435: duke@435: // Add chunk to end of last block -- if it's the largest duke@435: // block -- and update BOT and census data. We would duke@435: // of course have preferred to coalesce it with the duke@435: // last block, but it's currently less expensive to find the duke@435: // largest block than it is to find the last. duke@435: void duke@435: CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( duke@435: HeapWord* chunk, size_t size) { duke@435: // check that the chunk does lie in this space! duke@435: assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); duke@435: assert_locked(); duke@435: // One of the parallel gc task threads may be here duke@435: // whilst others are allocating. duke@435: Mutex* lock = NULL; duke@435: if (ParallelGCThreads != 0) { duke@435: lock = &_parDictionaryAllocLock; duke@435: } duke@435: FreeChunk* ec; duke@435: { duke@435: MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); duke@435: ec = dictionary()->findLargestDict(); // get largest block duke@435: if (ec != NULL && ec->end() == chunk) { duke@435: // It's a coterminal block - we can coalesce. duke@435: size_t old_size = ec->size(); duke@435: coalDeath(old_size); duke@435: removeChunkFromDictionary(ec); duke@435: size += old_size; duke@435: } else { duke@435: ec = (FreeChunk*)chunk; duke@435: } duke@435: } duke@435: ec->setSize(size); duke@435: debug_only(ec->mangleFreed(size)); duke@435: if (size < SmallForDictionary) { duke@435: lock = _indexedFreeListParLocks[size]; duke@435: } duke@435: MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); duke@435: addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); duke@435: // record the birth under the lock since the recording involves duke@435: // manipulation of the list on which the chunk lives and duke@435: // if the chunk is allocated and is the last on the list, duke@435: // the list can go away. duke@435: coalBirth(size); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, duke@435: size_t size) { duke@435: // check that the chunk does lie in this space! duke@435: assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); duke@435: assert_locked(); duke@435: _bt.verify_single_block(chunk, size); duke@435: duke@435: FreeChunk* fc = (FreeChunk*) chunk; duke@435: fc->setSize(size); duke@435: debug_only(fc->mangleFreed(size)); duke@435: if (size < SmallForDictionary) { duke@435: returnChunkToFreeList(fc); duke@435: } else { duke@435: returnChunkToDictionary(fc); duke@435: } duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, duke@435: size_t size, bool coalesced) { duke@435: assert_locked(); duke@435: assert(chunk != NULL, "null chunk"); duke@435: if (coalesced) { duke@435: // repair BOT duke@435: _bt.single_block(chunk, size); duke@435: } duke@435: addChunkToFreeLists(chunk, size); duke@435: } duke@435: duke@435: // We _must_ find the purported chunk on our free lists; duke@435: // we assert if we don't. duke@435: void duke@435: CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { duke@435: size_t size = fc->size(); duke@435: assert_locked(); duke@435: debug_only(verifyFreeLists()); duke@435: if (size < SmallForDictionary) { duke@435: removeChunkFromIndexedFreeList(fc); duke@435: } else { duke@435: removeChunkFromDictionary(fc); duke@435: } duke@435: _bt.verify_single_block((HeapWord*)fc, size); duke@435: debug_only(verifyFreeLists()); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { duke@435: size_t size = fc->size(); duke@435: assert_locked(); duke@435: assert(fc != NULL, "null chunk"); duke@435: _bt.verify_single_block((HeapWord*)fc, size); duke@435: _dictionary->removeChunk(fc); duke@435: // adjust _unallocated_block upward, as necessary duke@435: _bt.allocated((HeapWord*)fc, size); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { duke@435: assert_locked(); duke@435: size_t size = fc->size(); duke@435: _bt.verify_single_block((HeapWord*)fc, size); duke@435: NOT_PRODUCT( duke@435: if (FLSVerifyIndexTable) { duke@435: verifyIndexedFreeList(size); duke@435: } duke@435: ) duke@435: _indexedFreeList[size].removeChunk(fc); duke@435: debug_only(fc->clearNext()); duke@435: debug_only(fc->clearPrev()); duke@435: NOT_PRODUCT( duke@435: if (FLSVerifyIndexTable) { duke@435: verifyIndexedFreeList(size); duke@435: } duke@435: ) duke@435: } duke@435: duke@435: FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { duke@435: /* A hint is the next larger size that has a surplus. duke@435: Start search at a size large enough to guarantee that duke@435: the excess is >= MIN_CHUNK. */ duke@435: size_t start = align_object_size(numWords + MinChunkSize); duke@435: if (start < IndexSetSize) { duke@435: FreeList* it = _indexedFreeList; duke@435: size_t hint = _indexedFreeList[start].hint(); duke@435: while (hint < IndexSetSize) { duke@435: assert(hint % MinObjAlignment == 0, "hint should be aligned"); duke@435: FreeList *fl = &_indexedFreeList[hint]; duke@435: if (fl->surplus() > 0 && fl->head() != NULL) { duke@435: // Found a list with surplus, reset original hint duke@435: // and split out a free chunk which is returned. duke@435: _indexedFreeList[start].set_hint(hint); duke@435: FreeChunk* res = getFromListGreater(fl, numWords); duke@435: assert(res == NULL || res->isFree(), duke@435: "Should be returning a free chunk"); duke@435: return res; duke@435: } duke@435: hint = fl->hint(); /* keep looking */ duke@435: } duke@435: /* None found. */ duke@435: it[start].set_hint(IndexSetSize); duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: /* Requires fl->size >= numWords + MinChunkSize */ duke@435: FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl, duke@435: size_t numWords) { duke@435: FreeChunk *curr = fl->head(); duke@435: size_t oldNumWords = curr->size(); duke@435: assert(numWords >= MinChunkSize, "Word size is too small"); duke@435: assert(curr != NULL, "List is empty"); duke@435: assert(oldNumWords >= numWords + MinChunkSize, duke@435: "Size of chunks in the list is too small"); duke@435: duke@435: fl->removeChunk(curr); duke@435: // recorded indirectly by splitChunkAndReturnRemainder - duke@435: // smallSplit(oldNumWords, numWords); duke@435: FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); duke@435: // Does anything have to be done for the remainder in terms of duke@435: // fixing the card table? duke@435: assert(new_chunk == NULL || new_chunk->isFree(), duke@435: "Should be returning a free chunk"); duke@435: return new_chunk; duke@435: } duke@435: duke@435: FreeChunk* duke@435: CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, duke@435: size_t new_size) { duke@435: assert_locked(); duke@435: size_t size = chunk->size(); duke@435: assert(size > new_size, "Split from a smaller block?"); duke@435: assert(is_aligned(chunk), "alignment problem"); duke@435: assert(size == adjustObjectSize(size), "alignment problem"); duke@435: size_t rem_size = size - new_size; duke@435: assert(rem_size == adjustObjectSize(rem_size), "alignment problem"); duke@435: assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum"); duke@435: FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); duke@435: assert(is_aligned(ffc), "alignment problem"); duke@435: ffc->setSize(rem_size); duke@435: ffc->linkNext(NULL); duke@435: ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. duke@435: // Above must occur before BOT is updated below. duke@435: // adjust block offset table duke@435: _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); duke@435: if (rem_size < SmallForDictionary) { duke@435: bool is_par = (SharedHeap::heap()->n_par_threads() > 0); duke@435: if (is_par) _indexedFreeListParLocks[rem_size]->lock(); duke@435: returnChunkToFreeList(ffc); duke@435: split(size, rem_size); duke@435: if (is_par) _indexedFreeListParLocks[rem_size]->unlock(); duke@435: } else { duke@435: returnChunkToDictionary(ffc); duke@435: split(size ,rem_size); duke@435: } duke@435: chunk->setSize(new_size); duke@435: return chunk; duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::sweep_completed() { duke@435: // Now that space is probably plentiful, refill linear duke@435: // allocation blocks as needed. duke@435: refillLinearAllocBlocksIfNeeded(); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::gc_prologue() { duke@435: assert_locked(); duke@435: if (PrintFLSStatistics != 0) { duke@435: gclog_or_tty->print("Before GC:\n"); duke@435: reportFreeListStatistics(); duke@435: } duke@435: refillLinearAllocBlocksIfNeeded(); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::gc_epilogue() { duke@435: assert_locked(); duke@435: if (PrintGCDetails && Verbose && !_adaptive_freelists) { duke@435: if (_smallLinearAllocBlock._word_size == 0) duke@435: warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure"); duke@435: } duke@435: assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); duke@435: _promoInfo.stopTrackingPromotions(); duke@435: repairLinearAllocationBlocks(); duke@435: // Print Space's stats duke@435: if (PrintFLSStatistics != 0) { duke@435: gclog_or_tty->print("After GC:\n"); duke@435: reportFreeListStatistics(); duke@435: } duke@435: } duke@435: duke@435: // Iteration support, mostly delegated from a CMS generation duke@435: duke@435: void CompactibleFreeListSpace::save_marks() { duke@435: // mark the "end" of the used space at the time of this call; duke@435: // note, however, that promoted objects from this point duke@435: // on are tracked in the _promoInfo below. duke@435: set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? duke@435: unallocated_block() : end()); duke@435: // inform allocator that promotions should be tracked. duke@435: assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); duke@435: _promoInfo.startTrackingPromotions(); duke@435: } duke@435: duke@435: bool CompactibleFreeListSpace::no_allocs_since_save_marks() { duke@435: assert(_promoInfo.tracking(), "No preceding save_marks?"); duke@435: guarantee(SharedHeap::heap()->n_par_threads() == 0, duke@435: "Shouldn't be called (yet) during parallel part of gc."); duke@435: return _promoInfo.noPromotions(); duke@435: } duke@435: duke@435: #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ duke@435: \ duke@435: void CompactibleFreeListSpace:: \ duke@435: oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ duke@435: assert(SharedHeap::heap()->n_par_threads() == 0, \ duke@435: "Shouldn't be called (yet) during parallel part of gc."); \ duke@435: _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ duke@435: /* \ duke@435: * This also restores any displaced headers and removes the elements from \ duke@435: * the iteration set as they are processed, so that we have a clean slate \ duke@435: * at the end of the iteration. Note, thus, that if new objects are \ duke@435: * promoted as a result of the iteration they are iterated over as well. \ duke@435: */ \ duke@435: assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ duke@435: } duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) duke@435: duke@435: ////////////////////////////////////////////////////////////////////////////// duke@435: // We go over the list of promoted objects, removing each from the list, duke@435: // and applying the closure (this may, in turn, add more elements to duke@435: // the tail of the promoted list, and these newly added objects will duke@435: // also be processed) until the list is empty. duke@435: // To aid verification and debugging, in the non-product builds duke@435: // we actually forward _promoHead each time we process a promoted oop. duke@435: // Note that this is not necessary in general (i.e. when we don't need to duke@435: // call PromotionInfo::verify()) because oop_iterate can only add to the duke@435: // end of _promoTail, and never needs to look at _promoHead. duke@435: duke@435: #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \ duke@435: \ duke@435: void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \ duke@435: NOT_PRODUCT(verify()); \ duke@435: PromotedObject *curObj, *nextObj; \ duke@435: for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \ duke@435: if ((nextObj = curObj->next()) == NULL) { \ duke@435: /* protect ourselves against additions due to closure application \ duke@435: below by resetting the list. */ \ duke@435: assert(_promoTail == curObj, "Should have been the tail"); \ duke@435: _promoHead = _promoTail = NULL; \ duke@435: } \ duke@435: if (curObj->hasDisplacedMark()) { \ duke@435: /* restore displaced header */ \ duke@435: oop(curObj)->set_mark(nextDisplacedHeader()); \ duke@435: } else { \ duke@435: /* restore prototypical header */ \ duke@435: oop(curObj)->init_mark(); \ duke@435: } \ duke@435: /* The "promoted_mark" should now not be set */ \ duke@435: assert(!curObj->hasPromotedMark(), \ duke@435: "Should have been cleared by restoring displaced mark-word"); \ duke@435: NOT_PRODUCT(_promoHead = nextObj); \ duke@435: if (cl != NULL) oop(curObj)->oop_iterate(cl); \ duke@435: if (nextObj == NULL) { /* start at head of list reset above */ \ duke@435: nextObj = _promoHead; \ duke@435: } \ duke@435: } \ duke@435: assert(noPromotions(), "post-condition violation"); \ duke@435: assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\ duke@435: assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \ duke@435: assert(_firstIndex == _nextIndex, "empty buffer"); \ duke@435: } duke@435: duke@435: // This should have been ALL_SINCE_...() just like the others, duke@435: // but, because the body of the method above is somehwat longer, duke@435: // the MSVC compiler cannot cope; as a workaround, we split the duke@435: // macro into its 3 constituent parts below (see original macro duke@435: // definition in specializedOopClosures.hpp). duke@435: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN) duke@435: PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v) duke@435: duke@435: duke@435: void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { duke@435: // ugghh... how would one do this efficiently for a non-contiguous space? duke@435: guarantee(false, "NYI"); duke@435: } duke@435: ysr@447: bool CompactibleFreeListSpace::linearAllocationWouldFail() const { duke@435: return _smallLinearAllocBlock._word_size == 0; duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::repairLinearAllocationBlocks() { duke@435: // Fix up linear allocation blocks to look like free blocks duke@435: repairLinearAllocBlock(&_smallLinearAllocBlock); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { duke@435: assert_locked(); duke@435: if (blk->_ptr != NULL) { duke@435: assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, duke@435: "Minimum block size requirement"); duke@435: FreeChunk* fc = (FreeChunk*)(blk->_ptr); duke@435: fc->setSize(blk->_word_size); duke@435: fc->linkPrev(NULL); // mark as free duke@435: fc->dontCoalesce(); duke@435: assert(fc->isFree(), "just marked it free"); duke@435: assert(fc->cantCoalesce(), "just marked it uncoalescable"); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { duke@435: assert_locked(); duke@435: if (_smallLinearAllocBlock._ptr == NULL) { duke@435: assert(_smallLinearAllocBlock._word_size == 0, duke@435: "Size of linAB should be zero if the ptr is NULL"); duke@435: // Reset the linAB refill and allocation size limit. duke@435: _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); duke@435: } duke@435: refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { duke@435: assert_locked(); duke@435: assert((blk->_ptr == NULL && blk->_word_size == 0) || duke@435: (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), duke@435: "blk invariant"); duke@435: if (blk->_ptr == NULL) { duke@435: refillLinearAllocBlock(blk); duke@435: } duke@435: if (PrintMiscellaneous && Verbose) { duke@435: if (blk->_word_size == 0) { duke@435: warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { duke@435: assert_locked(); duke@435: assert(blk->_word_size == 0 && blk->_ptr == NULL, duke@435: "linear allocation block should be empty"); duke@435: FreeChunk* fc; duke@435: if (blk->_refillSize < SmallForDictionary && duke@435: (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { duke@435: // A linAB's strategy might be to use small sizes to reduce duke@435: // fragmentation but still get the benefits of allocation from a duke@435: // linAB. duke@435: } else { duke@435: fc = getChunkFromDictionary(blk->_refillSize); duke@435: } duke@435: if (fc != NULL) { duke@435: blk->_ptr = (HeapWord*)fc; duke@435: blk->_word_size = fc->size(); duke@435: fc->dontCoalesce(); // to prevent sweeper from sweeping us up duke@435: } duke@435: } duke@435: ysr@447: // Support for concurrent collection policy decisions. ysr@447: bool CompactibleFreeListSpace::should_concurrent_collect() const { ysr@447: // In the future we might want to add in frgamentation stats -- ysr@447: // including erosion of the "mountain" into this decision as well. ysr@447: return !adaptive_freelists() && linearAllocationWouldFail(); ysr@447: } ysr@447: duke@435: // Support for compaction duke@435: duke@435: void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { duke@435: SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); duke@435: // prepare_for_compaction() uses the space between live objects duke@435: // so that later phase can skip dead space quickly. So verification duke@435: // of the free lists doesn't work after. duke@435: } duke@435: duke@435: #define obj_size(q) adjustObjectSize(oop(q)->size()) duke@435: #define adjust_obj_size(s) adjustObjectSize(s) duke@435: duke@435: void CompactibleFreeListSpace::adjust_pointers() { duke@435: // In other versions of adjust_pointers(), a bail out duke@435: // based on the amount of live data in the generation duke@435: // (i.e., if 0, bail out) may be used. duke@435: // Cannot test used() == 0 here because the free lists have already duke@435: // been mangled by the compaction. duke@435: duke@435: SCAN_AND_ADJUST_POINTERS(adjust_obj_size); duke@435: // See note about verification in prepare_for_compaction(). duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::compact() { duke@435: SCAN_AND_COMPACT(obj_size); duke@435: } duke@435: duke@435: // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] duke@435: // where fbs is free block sizes duke@435: double CompactibleFreeListSpace::flsFrag() const { duke@435: size_t itabFree = totalSizeInIndexedFreeLists(); duke@435: double frag = 0.0; duke@435: size_t i; duke@435: duke@435: for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: double sz = i; duke@435: frag += _indexedFreeList[i].count() * (sz * sz); duke@435: } duke@435: duke@435: double totFree = itabFree + duke@435: _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())); duke@435: if (totFree > 0) { duke@435: frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / duke@435: (totFree * totFree)); duke@435: frag = (double)1.0 - frag; duke@435: } else { duke@435: assert(frag == 0.0, "Follows from totFree == 0"); duke@435: } duke@435: return frag; duke@435: } duke@435: duke@435: #define CoalSurplusPercent 1.05 duke@435: #define SplitSurplusPercent 1.10 duke@435: duke@435: void CompactibleFreeListSpace::beginSweepFLCensus( duke@435: float inter_sweep_current, duke@435: float inter_sweep_estimate) { duke@435: assert_locked(); duke@435: size_t i; duke@435: for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: FreeList* fl = &_indexedFreeList[i]; duke@435: fl->compute_desired(inter_sweep_current, inter_sweep_estimate); duke@435: fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent)); duke@435: fl->set_beforeSweep(fl->count()); duke@435: fl->set_bfrSurp(fl->surplus()); duke@435: } duke@435: _dictionary->beginSweepDictCensus(CoalSurplusPercent, duke@435: inter_sweep_current, duke@435: inter_sweep_estimate); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::setFLSurplus() { duke@435: assert_locked(); duke@435: size_t i; duke@435: for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: FreeList *fl = &_indexedFreeList[i]; duke@435: fl->set_surplus(fl->count() - duke@435: (ssize_t)((double)fl->desired() * SplitSurplusPercent)); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::setFLHints() { duke@435: assert_locked(); duke@435: size_t i; duke@435: size_t h = IndexSetSize; duke@435: for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { duke@435: FreeList *fl = &_indexedFreeList[i]; duke@435: fl->set_hint(h); duke@435: if (fl->surplus() > 0) { duke@435: h = i; duke@435: } duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::clearFLCensus() { duke@435: assert_locked(); duke@435: int i; duke@435: for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: FreeList *fl = &_indexedFreeList[i]; duke@435: fl->set_prevSweep(fl->count()); duke@435: fl->set_coalBirths(0); duke@435: fl->set_coalDeaths(0); duke@435: fl->set_splitBirths(0); duke@435: fl->set_splitDeaths(0); duke@435: } duke@435: } duke@435: ysr@447: void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { duke@435: setFLSurplus(); duke@435: setFLHints(); duke@435: if (PrintGC && PrintFLSCensus > 0) { ysr@447: printFLCensus(sweep_count); duke@435: } duke@435: clearFLCensus(); duke@435: assert_locked(); duke@435: _dictionary->endSweepDictCensus(SplitSurplusPercent); duke@435: } duke@435: duke@435: bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { duke@435: if (size < SmallForDictionary) { duke@435: FreeList *fl = &_indexedFreeList[size]; duke@435: return (fl->coalDesired() < 0) || duke@435: ((int)fl->count() > fl->coalDesired()); duke@435: } else { duke@435: return dictionary()->coalDictOverPopulated(size); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::smallCoalBirth(size_t size) { duke@435: assert(size < SmallForDictionary, "Size too large for indexed list"); duke@435: FreeList *fl = &_indexedFreeList[size]; duke@435: fl->increment_coalBirths(); duke@435: fl->increment_surplus(); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::smallCoalDeath(size_t size) { duke@435: assert(size < SmallForDictionary, "Size too large for indexed list"); duke@435: FreeList *fl = &_indexedFreeList[size]; duke@435: fl->increment_coalDeaths(); duke@435: fl->decrement_surplus(); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::coalBirth(size_t size) { duke@435: if (size < SmallForDictionary) { duke@435: smallCoalBirth(size); duke@435: } else { duke@435: dictionary()->dictCensusUpdate(size, duke@435: false /* split */, duke@435: true /* birth */); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::coalDeath(size_t size) { duke@435: if(size < SmallForDictionary) { duke@435: smallCoalDeath(size); duke@435: } else { duke@435: dictionary()->dictCensusUpdate(size, duke@435: false /* split */, duke@435: false /* birth */); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::smallSplitBirth(size_t size) { duke@435: assert(size < SmallForDictionary, "Size too large for indexed list"); duke@435: FreeList *fl = &_indexedFreeList[size]; duke@435: fl->increment_splitBirths(); duke@435: fl->increment_surplus(); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::smallSplitDeath(size_t size) { duke@435: assert(size < SmallForDictionary, "Size too large for indexed list"); duke@435: FreeList *fl = &_indexedFreeList[size]; duke@435: fl->increment_splitDeaths(); duke@435: fl->decrement_surplus(); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::splitBirth(size_t size) { duke@435: if (size < SmallForDictionary) { duke@435: smallSplitBirth(size); duke@435: } else { duke@435: dictionary()->dictCensusUpdate(size, duke@435: true /* split */, duke@435: true /* birth */); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::splitDeath(size_t size) { duke@435: if (size < SmallForDictionary) { duke@435: smallSplitDeath(size); duke@435: } else { duke@435: dictionary()->dictCensusUpdate(size, duke@435: true /* split */, duke@435: false /* birth */); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::split(size_t from, size_t to1) { duke@435: size_t to2 = from - to1; duke@435: splitDeath(from); duke@435: splitBirth(to1); duke@435: splitBirth(to2); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::print() const { duke@435: tty->print(" CompactibleFreeListSpace"); duke@435: Space::print(); duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::prepare_for_verify() { duke@435: assert_locked(); duke@435: repairLinearAllocationBlocks(); duke@435: // Verify that the SpoolBlocks look like free blocks of duke@435: // appropriate sizes... To be done ... duke@435: } duke@435: duke@435: class VerifyAllBlksClosure: public BlkClosure { coleenp@548: private: duke@435: const CompactibleFreeListSpace* _sp; duke@435: const MemRegion _span; duke@435: duke@435: public: duke@435: VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, duke@435: MemRegion span) : _sp(sp), _span(span) { } duke@435: coleenp@548: virtual size_t do_blk(HeapWord* addr) { duke@435: size_t res; duke@435: if (_sp->block_is_obj(addr)) { duke@435: oop p = oop(addr); duke@435: guarantee(p->is_oop(), "Should be an oop"); duke@435: res = _sp->adjustObjectSize(p->size()); duke@435: if (_sp->obj_is_alive(addr)) { duke@435: p->verify(); duke@435: } duke@435: } else { duke@435: FreeChunk* fc = (FreeChunk*)addr; duke@435: res = fc->size(); duke@435: if (FLSVerifyLists && !fc->cantCoalesce()) { duke@435: guarantee(_sp->verifyChunkInFreeLists(fc), duke@435: "Chunk should be on a free list"); duke@435: } duke@435: } duke@435: guarantee(res != 0, "Livelock: no rank reduction!"); duke@435: return res; duke@435: } duke@435: }; duke@435: duke@435: class VerifyAllOopsClosure: public OopClosure { coleenp@548: private: duke@435: const CMSCollector* _collector; duke@435: const CompactibleFreeListSpace* _sp; duke@435: const MemRegion _span; duke@435: const bool _past_remark; duke@435: const CMSBitMap* _bit_map; duke@435: coleenp@548: protected: coleenp@548: void do_oop(void* p, oop obj) { coleenp@548: if (_span.contains(obj)) { // the interior oop points into CMS heap coleenp@548: if (!_span.contains(p)) { // reference from outside CMS heap coleenp@548: // Should be a valid object; the first disjunct below allows coleenp@548: // us to sidestep an assertion in block_is_obj() that insists coleenp@548: // that p be in _sp. Note that several generations (and spaces) coleenp@548: // are spanned by _span (CMS heap) above. coleenp@548: guarantee(!_sp->is_in_reserved(obj) || coleenp@548: _sp->block_is_obj((HeapWord*)obj), coleenp@548: "Should be an object"); coleenp@548: guarantee(obj->is_oop(), "Should be an oop"); coleenp@548: obj->verify(); coleenp@548: if (_past_remark) { coleenp@548: // Remark has been completed, the object should be marked coleenp@548: _bit_map->isMarked((HeapWord*)obj); coleenp@548: } coleenp@548: } else { // reference within CMS heap coleenp@548: if (_past_remark) { coleenp@548: // Remark has been completed -- so the referent should have coleenp@548: // been marked, if referring object is. coleenp@548: if (_bit_map->isMarked(_collector->block_start(p))) { coleenp@548: guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?"); coleenp@548: } coleenp@548: } coleenp@548: } coleenp@548: } else if (_sp->is_in_reserved(p)) { coleenp@548: // the reference is from FLS, and points out of FLS coleenp@548: guarantee(obj->is_oop(), "Should be an oop"); coleenp@548: obj->verify(); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: template void do_oop_work(T* p) { coleenp@548: T heap_oop = oopDesc::load_heap_oop(p); coleenp@548: if (!oopDesc::is_null(heap_oop)) { coleenp@548: oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); coleenp@548: do_oop(p, obj); coleenp@548: } coleenp@548: } coleenp@548: duke@435: public: duke@435: VerifyAllOopsClosure(const CMSCollector* collector, duke@435: const CompactibleFreeListSpace* sp, MemRegion span, duke@435: bool past_remark, CMSBitMap* bit_map) : duke@435: OopClosure(), _collector(collector), _sp(sp), _span(span), duke@435: _past_remark(past_remark), _bit_map(bit_map) { } duke@435: coleenp@548: virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } coleenp@548: virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } duke@435: }; duke@435: duke@435: void CompactibleFreeListSpace::verify(bool ignored) const { duke@435: assert_lock_strong(&_freelistLock); duke@435: verify_objects_initialized(); duke@435: MemRegion span = _collector->_span; duke@435: bool past_remark = (_collector->abstract_state() == duke@435: CMSCollector::Sweeping); duke@435: duke@435: ResourceMark rm; duke@435: HandleMark hm; duke@435: duke@435: // Check integrity of CFL data structures duke@435: _promoInfo.verify(); duke@435: _dictionary->verify(); duke@435: if (FLSVerifyIndexTable) { duke@435: verifyIndexedFreeLists(); duke@435: } duke@435: // Check integrity of all objects and free blocks in space duke@435: { duke@435: VerifyAllBlksClosure cl(this, span); duke@435: ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const duke@435: } duke@435: // Check that all references in the heap to FLS duke@435: // are to valid objects in FLS or that references in duke@435: // FLS are to valid objects elsewhere in the heap duke@435: if (FLSVerifyAllHeapReferences) duke@435: { duke@435: VerifyAllOopsClosure cl(_collector, this, span, past_remark, duke@435: _collector->markBitMap()); duke@435: CollectedHeap* ch = Universe::heap(); duke@435: ch->oop_iterate(&cl); // all oops in generations duke@435: ch->permanent_oop_iterate(&cl); // all oops in perm gen duke@435: } duke@435: duke@435: if (VerifyObjectStartArray) { duke@435: // Verify the block offset table duke@435: _bt.verify(); duke@435: } duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void CompactibleFreeListSpace::verifyFreeLists() const { duke@435: if (FLSVerifyLists) { duke@435: _dictionary->verify(); duke@435: verifyIndexedFreeLists(); duke@435: } else { duke@435: if (FLSVerifyDictionary) { duke@435: _dictionary->verify(); duke@435: } duke@435: if (FLSVerifyIndexTable) { duke@435: verifyIndexedFreeLists(); duke@435: } duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: void CompactibleFreeListSpace::verifyIndexedFreeLists() const { duke@435: size_t i = 0; duke@435: for (; i < MinChunkSize; i++) { duke@435: guarantee(_indexedFreeList[i].head() == NULL, "should be NULL"); duke@435: } duke@435: for (; i < IndexSetSize; i++) { duke@435: verifyIndexedFreeList(i); duke@435: } duke@435: } duke@435: duke@435: void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { duke@435: guarantee(size % 2 == 0, "Odd slots should be empty"); duke@435: for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL; duke@435: fc = fc->next()) { duke@435: guarantee(fc->size() == size, "Size inconsistency"); duke@435: guarantee(fc->isFree(), "!free?"); duke@435: guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list"); duke@435: } duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void CompactibleFreeListSpace::checkFreeListConsistency() const { duke@435: assert(_dictionary->minSize() <= IndexSetSize, duke@435: "Some sizes can't be allocated without recourse to" duke@435: " linear allocation buffers"); duke@435: assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), duke@435: "else MIN_TREE_CHUNK_SIZE is wrong"); duke@435: assert((IndexSetStride == 2 && IndexSetStart == 2) || duke@435: (IndexSetStride == 1 && IndexSetStart == 1), "just checking"); duke@435: assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), duke@435: "Some for-loops may be incorrectly initialized"); duke@435: assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), duke@435: "For-loops that iterate over IndexSet with stride 2 may be wrong"); duke@435: } duke@435: #endif duke@435: ysr@447: void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { duke@435: assert_lock_strong(&_freelistLock); ysr@447: FreeList total; ysr@447: gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); ysr@447: FreeList::print_labels_on(gclog_or_tty, "size"); duke@435: size_t totalFree = 0; duke@435: for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { duke@435: const FreeList *fl = &_indexedFreeList[i]; ysr@447: totalFree += fl->count() * fl->size(); ysr@447: if (i % (40*IndexSetStride) == 0) { ysr@447: FreeList::print_labels_on(gclog_or_tty, "size"); ysr@447: } ysr@447: fl->print_on(gclog_or_tty); ysr@447: total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() ); ysr@447: total.set_surplus( total.surplus() + fl->surplus() ); ysr@447: total.set_desired( total.desired() + fl->desired() ); ysr@447: total.set_prevSweep( total.prevSweep() + fl->prevSweep() ); ysr@447: total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep()); ysr@447: total.set_count( total.count() + fl->count() ); ysr@447: total.set_coalBirths( total.coalBirths() + fl->coalBirths() ); ysr@447: total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() ); ysr@447: total.set_splitBirths(total.splitBirths() + fl->splitBirths()); ysr@447: total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths()); duke@435: } ysr@447: total.print_on(gclog_or_tty, "TOTAL"); ysr@447: gclog_or_tty->print_cr("Total free in indexed lists " ysr@447: SIZE_FORMAT " words", totalFree); duke@435: gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", ysr@447: (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/ ysr@447: (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0), ysr@447: (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); duke@435: _dictionary->printDictCensus(); duke@435: } duke@435: duke@435: // Return the next displaced header, incrementing the pointer and duke@435: // recycling spool area as necessary. duke@435: markOop PromotionInfo::nextDisplacedHeader() { duke@435: assert(_spoolHead != NULL, "promotionInfo inconsistency"); duke@435: assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex, duke@435: "Empty spool space: no displaced header can be fetched"); duke@435: assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?"); duke@435: markOop hdr = _spoolHead->displacedHdr[_firstIndex]; duke@435: // Spool forward duke@435: if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block duke@435: // forward to next block, recycling this block into spare spool buffer duke@435: SpoolBlock* tmp = _spoolHead->nextSpoolBlock; duke@435: assert(_spoolHead != _spoolTail, "Spooling storage mix-up"); duke@435: _spoolHead->nextSpoolBlock = _spareSpool; duke@435: _spareSpool = _spoolHead; duke@435: _spoolHead = tmp; duke@435: _firstIndex = 1; duke@435: NOT_PRODUCT( duke@435: if (_spoolHead == NULL) { // all buffers fully consumed duke@435: assert(_spoolTail == NULL && _nextIndex == 1, duke@435: "spool buffers processing inconsistency"); duke@435: } duke@435: ) duke@435: } duke@435: return hdr; duke@435: } duke@435: duke@435: void PromotionInfo::track(PromotedObject* trackOop) { duke@435: track(trackOop, oop(trackOop)->klass()); duke@435: } duke@435: duke@435: void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) { duke@435: // make a copy of header as it may need to be spooled duke@435: markOop mark = oop(trackOop)->mark(); duke@435: trackOop->clearNext(); duke@435: if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) { duke@435: // save non-prototypical header, and mark oop duke@435: saveDisplacedHeader(mark); duke@435: trackOop->setDisplacedMark(); duke@435: } else { duke@435: // we'd like to assert something like the following: duke@435: // assert(mark == markOopDesc::prototype(), "consistency check"); duke@435: // ... but the above won't work because the age bits have not (yet) been duke@435: // cleared. The remainder of the check would be identical to the duke@435: // condition checked in must_be_preserved() above, so we don't really duke@435: // have anything useful to check here! duke@435: } duke@435: if (_promoTail != NULL) { duke@435: assert(_promoHead != NULL, "List consistency"); duke@435: _promoTail->setNext(trackOop); duke@435: _promoTail = trackOop; duke@435: } else { duke@435: assert(_promoHead == NULL, "List consistency"); duke@435: _promoHead = _promoTail = trackOop; duke@435: } duke@435: // Mask as newly promoted, so we can skip over such objects duke@435: // when scanning dirty cards duke@435: assert(!trackOop->hasPromotedMark(), "Should not have been marked"); duke@435: trackOop->setPromotedMark(); duke@435: } duke@435: duke@435: // Save the given displaced header, incrementing the pointer and duke@435: // obtaining more spool area as necessary. duke@435: void PromotionInfo::saveDisplacedHeader(markOop hdr) { duke@435: assert(_spoolHead != NULL && _spoolTail != NULL, duke@435: "promotionInfo inconsistency"); duke@435: assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?"); duke@435: _spoolTail->displacedHdr[_nextIndex] = hdr; duke@435: // Spool forward duke@435: if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block duke@435: // get a new spooling block duke@435: assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list"); duke@435: _splice_point = _spoolTail; // save for splicing duke@435: _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail duke@435: _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ... duke@435: // ... but will attempt filling before next promotion attempt duke@435: _nextIndex = 1; duke@435: } duke@435: } duke@435: duke@435: // Ensure that spooling space exists. Return false if spooling space duke@435: // could not be obtained. duke@435: bool PromotionInfo::ensure_spooling_space_work() { duke@435: assert(!has_spooling_space(), "Only call when there is no spooling space"); duke@435: // Try and obtain more spooling space duke@435: SpoolBlock* newSpool = getSpoolBlock(); duke@435: assert(newSpool == NULL || duke@435: (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL), duke@435: "getSpoolBlock() sanity check"); duke@435: if (newSpool == NULL) { duke@435: return false; duke@435: } duke@435: _nextIndex = 1; duke@435: if (_spoolTail == NULL) { duke@435: _spoolTail = newSpool; duke@435: if (_spoolHead == NULL) { duke@435: _spoolHead = newSpool; duke@435: _firstIndex = 1; duke@435: } else { duke@435: assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL, duke@435: "Splice point invariant"); duke@435: // Extra check that _splice_point is connected to list duke@435: #ifdef ASSERT duke@435: { duke@435: SpoolBlock* blk = _spoolHead; duke@435: for (; blk->nextSpoolBlock != NULL; duke@435: blk = blk->nextSpoolBlock); duke@435: assert(blk != NULL && blk == _splice_point, duke@435: "Splice point incorrect"); duke@435: } duke@435: #endif // ASSERT duke@435: _splice_point->nextSpoolBlock = newSpool; duke@435: } duke@435: } else { duke@435: assert(_spoolHead != NULL, "spool list consistency"); duke@435: _spoolTail->nextSpoolBlock = newSpool; duke@435: _spoolTail = newSpool; duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: // Get a free spool buffer from the free pool, getting a new block duke@435: // from the heap if necessary. duke@435: SpoolBlock* PromotionInfo::getSpoolBlock() { duke@435: SpoolBlock* res; duke@435: if ((res = _spareSpool) != NULL) { duke@435: _spareSpool = _spareSpool->nextSpoolBlock; duke@435: res->nextSpoolBlock = NULL; duke@435: } else { // spare spool exhausted, get some from heap duke@435: res = (SpoolBlock*)(space()->allocateScratch(refillSize())); duke@435: if (res != NULL) { duke@435: res->init(); duke@435: } duke@435: } duke@435: assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition"); duke@435: return res; duke@435: } duke@435: duke@435: void PromotionInfo::startTrackingPromotions() { duke@435: assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex, duke@435: "spooling inconsistency?"); duke@435: _firstIndex = _nextIndex = 1; duke@435: _tracking = true; duke@435: } duke@435: duke@435: void PromotionInfo::stopTrackingPromotions() { duke@435: assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex, duke@435: "spooling inconsistency?"); duke@435: _firstIndex = _nextIndex = 1; duke@435: _tracking = false; duke@435: } duke@435: duke@435: // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex> duke@435: // points to the next slot available for filling. duke@435: // The set of slots holding displaced headers are then all those in the duke@435: // right-open interval denoted by: duke@435: // duke@435: // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> ) duke@435: // duke@435: // When _spoolTail is NULL, then the set of slots with displaced headers duke@435: // is all those starting at the slot <_spoolHead, _firstIndex> and duke@435: // going up to the last slot of last block in the linked list. duke@435: // In this lartter case, _splice_point points to the tail block of duke@435: // this linked list of blocks holding displaced headers. duke@435: void PromotionInfo::verify() const { duke@435: // Verify the following: duke@435: // 1. the number of displaced headers matches the number of promoted duke@435: // objects that have displaced headers duke@435: // 2. each promoted object lies in this space duke@435: debug_only( duke@435: PromotedObject* junk = NULL; duke@435: assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()), duke@435: "Offset of PromotedObject::_next is expected to align with " duke@435: " the OopDesc::_mark within OopDesc"); duke@435: ) duke@435: // FIXME: guarantee???? duke@435: guarantee(_spoolHead == NULL || _spoolTail != NULL || duke@435: _splice_point != NULL, "list consistency"); duke@435: guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency"); duke@435: // count the number of objects with displaced headers duke@435: size_t numObjsWithDisplacedHdrs = 0; duke@435: for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) { duke@435: guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment"); duke@435: // the last promoted object may fail the mark() != NULL test of is_oop(). duke@435: guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop"); duke@435: if (curObj->hasDisplacedMark()) { duke@435: numObjsWithDisplacedHdrs++; duke@435: } duke@435: } duke@435: // Count the number of displaced headers duke@435: size_t numDisplacedHdrs = 0; duke@435: for (SpoolBlock* curSpool = _spoolHead; duke@435: curSpool != _spoolTail && curSpool != NULL; duke@435: curSpool = curSpool->nextSpoolBlock) { duke@435: // the first entry is just a self-pointer; indices 1 through duke@435: // bufferSize - 1 are occupied (thus, bufferSize - 1 slots). duke@435: guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr, duke@435: "first entry of displacedHdr should be self-referential"); duke@435: numDisplacedHdrs += curSpool->bufferSize - 1; duke@435: } duke@435: guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0), duke@435: "internal consistency"); duke@435: guarantee(_spoolTail != NULL || _nextIndex == 1, duke@435: "Inconsistency between _spoolTail and _nextIndex"); duke@435: // We overcounted (_firstIndex-1) worth of slots in block duke@435: // _spoolHead and we undercounted (_nextIndex-1) worth of duke@435: // slots in block _spoolTail. We make an appropriate duke@435: // adjustment by subtracting the first and adding the duke@435: // second: - (_firstIndex - 1) + (_nextIndex - 1) duke@435: numDisplacedHdrs += (_nextIndex - _firstIndex); duke@435: guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count"); duke@435: } duke@435: duke@435: duke@435: CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) : duke@435: _cfls(cfls) duke@435: { duke@435: _blocks_to_claim = CMSParPromoteBlocksToClaim; duke@435: for (size_t i = CompactibleFreeListSpace::IndexSetStart; duke@435: i < CompactibleFreeListSpace::IndexSetSize; duke@435: i += CompactibleFreeListSpace::IndexSetStride) { duke@435: _indexedFreeList[i].set_size(i); duke@435: } duke@435: } duke@435: duke@435: HeapWord* CFLS_LAB::alloc(size_t word_sz) { duke@435: FreeChunk* res; duke@435: word_sz = _cfls->adjustObjectSize(word_sz); duke@435: if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { duke@435: // This locking manages sync with other large object allocations. duke@435: MutexLockerEx x(_cfls->parDictionaryAllocLock(), duke@435: Mutex::_no_safepoint_check_flag); duke@435: res = _cfls->getChunkFromDictionaryExact(word_sz); duke@435: if (res == NULL) return NULL; duke@435: } else { duke@435: FreeList* fl = &_indexedFreeList[word_sz]; duke@435: bool filled = false; //TRAP duke@435: if (fl->count() == 0) { duke@435: bool filled = true; //TRAP duke@435: // Attempt to refill this local free list. duke@435: _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl); duke@435: // If it didn't work, give up. duke@435: if (fl->count() == 0) return NULL; duke@435: } duke@435: res = fl->getChunkAtHead(); duke@435: assert(res != NULL, "Why was count non-zero?"); duke@435: } duke@435: res->markNotFree(); duke@435: assert(!res->isFree(), "shouldn't be marked free"); duke@435: assert(oop(res)->klass() == NULL, "should look uninitialized"); duke@435: // mangle a just allocated object with a distinct pattern. duke@435: debug_only(res->mangleAllocated(word_sz)); duke@435: return (HeapWord*)res; duke@435: } duke@435: duke@435: void CFLS_LAB::retire() { duke@435: for (size_t i = CompactibleFreeListSpace::IndexSetStart; duke@435: i < CompactibleFreeListSpace::IndexSetSize; duke@435: i += CompactibleFreeListSpace::IndexSetStride) { duke@435: if (_indexedFreeList[i].count() > 0) { duke@435: MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], duke@435: Mutex::_no_safepoint_check_flag); duke@435: _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); duke@435: // Reset this list. duke@435: _indexedFreeList[i] = FreeList(); duke@435: _indexedFreeList[i].set_size(i); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void duke@435: CompactibleFreeListSpace:: duke@435: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) { duke@435: assert(fl->count() == 0, "Precondition."); duke@435: assert(word_sz < CompactibleFreeListSpace::IndexSetSize, duke@435: "Precondition"); duke@435: duke@435: // We'll try all multiples of word_sz in the indexed set (starting with duke@435: // word_sz itself), then try getting a big chunk and splitting it. duke@435: int k = 1; duke@435: size_t cur_sz = k * word_sz; duke@435: bool found = false; duke@435: while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) { duke@435: FreeList* gfl = &_indexedFreeList[cur_sz]; duke@435: FreeList fl_for_cur_sz; // Empty. duke@435: fl_for_cur_sz.set_size(cur_sz); duke@435: { duke@435: MutexLockerEx x(_indexedFreeListParLocks[cur_sz], duke@435: Mutex::_no_safepoint_check_flag); duke@435: if (gfl->count() != 0) { duke@435: size_t nn = MAX2(n/k, (size_t)1); duke@435: gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); duke@435: found = true; duke@435: } duke@435: } duke@435: // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. duke@435: if (found) { duke@435: if (k == 1) { duke@435: fl->prepend(&fl_for_cur_sz); duke@435: } else { duke@435: // Divide each block on fl_for_cur_sz up k ways. duke@435: FreeChunk* fc; duke@435: while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { duke@435: // Must do this in reverse order, so that anybody attempting to duke@435: // access the main chunk sees it as a single free block until we duke@435: // change it. duke@435: size_t fc_size = fc->size(); duke@435: for (int i = k-1; i >= 0; i--) { duke@435: FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); duke@435: ffc->setSize(word_sz); duke@435: ffc->linkNext(NULL); duke@435: ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. duke@435: // Above must occur before BOT is updated below. duke@435: // splitting from the right, fc_size == (k - i + 1) * wordsize duke@435: _bt.mark_block((HeapWord*)ffc, word_sz); duke@435: fc_size -= word_sz; duke@435: _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); duke@435: _bt.verify_single_block((HeapWord*)fc, fc_size); duke@435: _bt.verify_single_block((HeapWord*)ffc, ffc->size()); duke@435: // Push this on "fl". duke@435: fl->returnChunkAtHead(ffc); duke@435: } duke@435: // TRAP duke@435: assert(fl->tail()->next() == NULL, "List invariant."); duke@435: } duke@435: } duke@435: return; duke@435: } duke@435: k++; cur_sz = k * word_sz; duke@435: } duke@435: // Otherwise, we'll split a block from the dictionary. duke@435: FreeChunk* fc = NULL; duke@435: FreeChunk* rem_fc = NULL; duke@435: size_t rem; duke@435: { duke@435: MutexLockerEx x(parDictionaryAllocLock(), duke@435: Mutex::_no_safepoint_check_flag); duke@435: while (n > 0) { duke@435: fc = dictionary()->getChunk(MAX2(n * word_sz, duke@435: _dictionary->minSize()), duke@435: FreeBlockDictionary::atLeast); duke@435: if (fc != NULL) { duke@435: _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk duke@435: dictionary()->dictCensusUpdate(fc->size(), duke@435: true /*split*/, duke@435: false /*birth*/); duke@435: break; duke@435: } else { duke@435: n--; duke@435: } duke@435: } duke@435: if (fc == NULL) return; duke@435: // Otherwise, split up that block. duke@435: size_t nn = fc->size() / word_sz; duke@435: n = MIN2(nn, n); duke@435: rem = fc->size() - n * word_sz; duke@435: // If there is a remainder, and it's too small, allocate one fewer. duke@435: if (rem > 0 && rem < MinChunkSize) { duke@435: n--; rem += word_sz; duke@435: } duke@435: // First return the remainder, if any. duke@435: // Note that we hold the lock until we decide if we're going to give duke@435: // back the remainder to the dictionary, since a contending allocator duke@435: // may otherwise see the heap as empty. (We're willing to take that duke@435: // hit if the block is a small block.) duke@435: if (rem > 0) { duke@435: size_t prefix_size = n * word_sz; duke@435: rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); duke@435: rem_fc->setSize(rem); duke@435: rem_fc->linkNext(NULL); duke@435: rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. duke@435: // Above must occur before BOT is updated below. duke@435: _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); duke@435: if (rem >= IndexSetSize) { duke@435: returnChunkToDictionary(rem_fc); duke@435: dictionary()->dictCensusUpdate(fc->size(), duke@435: true /*split*/, duke@435: true /*birth*/); duke@435: rem_fc = NULL; duke@435: } duke@435: // Otherwise, return it to the small list below. duke@435: } duke@435: } duke@435: // duke@435: if (rem_fc != NULL) { duke@435: MutexLockerEx x(_indexedFreeListParLocks[rem], duke@435: Mutex::_no_safepoint_check_flag); duke@435: _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); duke@435: _indexedFreeList[rem].returnChunkAtHead(rem_fc); duke@435: smallSplitBirth(rem); duke@435: } duke@435: duke@435: // Now do the splitting up. duke@435: // Must do this in reverse order, so that anybody attempting to duke@435: // access the main chunk sees it as a single free block until we duke@435: // change it. duke@435: size_t fc_size = n * word_sz; duke@435: // All but first chunk in this loop duke@435: for (ssize_t i = n-1; i > 0; i--) { duke@435: FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); duke@435: ffc->setSize(word_sz); duke@435: ffc->linkNext(NULL); duke@435: ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. duke@435: // Above must occur before BOT is updated below. duke@435: // splitting from the right, fc_size == (n - i + 1) * wordsize duke@435: _bt.mark_block((HeapWord*)ffc, word_sz); duke@435: fc_size -= word_sz; duke@435: _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); duke@435: _bt.verify_single_block((HeapWord*)ffc, ffc->size()); duke@435: _bt.verify_single_block((HeapWord*)fc, fc_size); duke@435: // Push this on "fl". duke@435: fl->returnChunkAtHead(ffc); duke@435: } duke@435: // First chunk duke@435: fc->setSize(word_sz); duke@435: fc->linkNext(NULL); duke@435: fc->linkPrev(NULL); duke@435: _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); duke@435: _bt.verify_single_block((HeapWord*)fc, fc->size()); duke@435: fl->returnChunkAtHead(fc); duke@435: duke@435: { duke@435: MutexLockerEx x(_indexedFreeListParLocks[word_sz], duke@435: Mutex::_no_safepoint_check_flag); duke@435: ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n; duke@435: _indexedFreeList[word_sz].set_splitBirths(new_births); duke@435: ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; duke@435: _indexedFreeList[word_sz].set_surplus(new_surplus); duke@435: } duke@435: duke@435: // TRAP duke@435: assert(fl->tail()->next() == NULL, "List invariant."); duke@435: } duke@435: duke@435: // Set up the space's par_seq_tasks structure for work claiming duke@435: // for parallel rescan. See CMSParRemarkTask where this is currently used. duke@435: // XXX Need to suitably abstract and generalize this and the next duke@435: // method into one. duke@435: void duke@435: CompactibleFreeListSpace:: duke@435: initialize_sequential_subtasks_for_rescan(int n_threads) { duke@435: // The "size" of each task is fixed according to rescan_task_size. duke@435: assert(n_threads > 0, "Unexpected n_threads argument"); duke@435: const size_t task_size = rescan_task_size(); duke@435: size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; duke@435: assert((used_region().start() + (n_tasks - 1)*task_size < duke@435: used_region().end()) && duke@435: (used_region().start() + n_tasks*task_size >= duke@435: used_region().end()), "n_task calculation incorrect"); duke@435: SequentialSubTasksDone* pst = conc_par_seq_tasks(); duke@435: assert(!pst->valid(), "Clobbering existing data?"); duke@435: pst->set_par_threads(n_threads); duke@435: pst->set_n_tasks((int)n_tasks); duke@435: } duke@435: duke@435: // Set up the space's par_seq_tasks structure for work claiming duke@435: // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. duke@435: void duke@435: CompactibleFreeListSpace:: duke@435: initialize_sequential_subtasks_for_marking(int n_threads, duke@435: HeapWord* low) { duke@435: // The "size" of each task is fixed according to rescan_task_size. duke@435: assert(n_threads > 0, "Unexpected n_threads argument"); duke@435: const size_t task_size = marking_task_size(); duke@435: assert(task_size > CardTableModRefBS::card_size_in_words && duke@435: (task_size % CardTableModRefBS::card_size_in_words == 0), duke@435: "Otherwise arithmetic below would be incorrect"); duke@435: MemRegion span = _gen->reserved(); duke@435: if (low != NULL) { duke@435: if (span.contains(low)) { duke@435: // Align low down to a card boundary so that duke@435: // we can use block_offset_careful() on span boundaries. duke@435: HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, duke@435: CardTableModRefBS::card_size); duke@435: // Clip span prefix at aligned_low duke@435: span = span.intersection(MemRegion(aligned_low, span.end())); duke@435: } else if (low > span.end()) { duke@435: span = MemRegion(low, low); // Null region duke@435: } // else use entire span duke@435: } duke@435: assert(span.is_empty() || duke@435: ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0), duke@435: "span should start at a card boundary"); duke@435: size_t n_tasks = (span.word_size() + task_size - 1)/task_size; duke@435: assert((n_tasks == 0) == span.is_empty(), "Inconsistency"); duke@435: assert(n_tasks == 0 || duke@435: ((span.start() + (n_tasks - 1)*task_size < span.end()) && duke@435: (span.start() + n_tasks*task_size >= span.end())), duke@435: "n_task calculation incorrect"); duke@435: SequentialSubTasksDone* pst = conc_par_seq_tasks(); duke@435: assert(!pst->valid(), "Clobbering existing data?"); duke@435: pst->set_par_threads(n_threads); duke@435: pst->set_n_tasks((int)n_tasks); duke@435: }