duke@435: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/parNew/parGCAllocBuffer.hpp" stefank@2314: #include "memory/sharedHeap.hpp" stefank@2314: #include "oops/arrayOop.hpp" stefank@2314: #include "oops/oop.inline.hpp" duke@435: duke@435: ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) : duke@435: _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL), duke@435: _end(NULL), _hard_end(NULL), duke@435: _retained(false), _retained_filler(), duke@435: _allocated(0), _wasted(0) duke@435: { duke@435: assert (min_size() > AlignmentReserve, "Inconsistency!"); coleenp@548: // arrayOopDesc::header_size depends on command line initialization. coleenp@548: FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT)); coleenp@548: AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; duke@435: } duke@435: coleenp@548: size_t ParGCAllocBuffer::FillerHeaderSize; duke@435: duke@435: // If the minimum object size is greater than MinObjAlignment, we can duke@435: // end up with a shard at the end of the buffer that's smaller than duke@435: // the smallest object. We can't allow that because the buffer must duke@435: // look like it's full of objects when we retire it, so we make duke@435: // sure we have enough space for a filler int array object. coleenp@548: size_t ParGCAllocBuffer::AlignmentReserve; duke@435: duke@435: void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) { duke@435: assert(!retain || end_of_gc, "Can only retain at GC end."); duke@435: if (_retained) { duke@435: // If the buffer had been retained shorten the previous filler object. duke@435: assert(_retained_filler.end() <= _top, "INVARIANT"); jcoomes@916: CollectedHeap::fill_with_object(_retained_filler); duke@435: // Wasted space book-keeping, otherwise (normally) done in invalidate() duke@435: _wasted += _retained_filler.word_size(); duke@435: _retained = false; duke@435: } duke@435: assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); duke@435: if (_top < _hard_end) { jcoomes@916: CollectedHeap::fill_with_object(_top, _hard_end); duke@435: if (!retain) { duke@435: invalidate(); duke@435: } else { duke@435: // Is there wasted space we'd like to retain for the next GC? duke@435: if (pointer_delta(_end, _top) > FillerHeaderSize) { duke@435: _retained = true; duke@435: _retained_filler = MemRegion(_top, FillerHeaderSize); duke@435: _top = _top + FillerHeaderSize; duke@435: } else { duke@435: invalidate(); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: void ParGCAllocBuffer::flush_stats(PLABStats* stats) { duke@435: assert(ResizePLAB, "Wasted work"); duke@435: stats->add_allocated(_allocated); duke@435: stats->add_wasted(_wasted); duke@435: stats->add_unused(pointer_delta(_end, _top)); duke@435: } duke@435: duke@435: // Compute desired plab size and latch result for later duke@435: // use. This should be called once at the end of parallel duke@435: // scavenge; it clears the sensor accumulators. duke@435: void PLABStats::adjust_desired_plab_sz() { duke@435: assert(ResizePLAB, "Not set"); duke@435: if (_allocated == 0) { duke@435: assert(_unused == 0, "Inconsistency in PLAB stats"); duke@435: _allocated = 1; duke@435: } duke@435: double wasted_frac = (double)_unused/(double)_allocated; duke@435: size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/ duke@435: TargetPLABWastePct); duke@435: if (target_refills == 0) { duke@435: target_refills = 1; duke@435: } duke@435: _used = _allocated - _wasted - _unused; duke@435: size_t plab_sz = _used/(target_refills*ParallelGCThreads); duke@435: if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz); duke@435: // Take historical weighted average duke@435: _filter.sample(plab_sz); duke@435: // Clip from above and below, and align to object boundary duke@435: plab_sz = MAX2(min_size(), (size_t)_filter.average()); duke@435: plab_sz = MIN2(max_size(), plab_sz); duke@435: plab_sz = align_object_size(plab_sz); duke@435: // Latch the result duke@435: if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz); duke@435: if (ResizePLAB) { duke@435: _desired_plab_sz = plab_sz; duke@435: } duke@435: // Now clear the accumulators for next round: duke@435: // note this needs to be fixed in the case where we duke@435: // are retaining across scavenges. FIX ME !!! XXX duke@435: _allocated = 0; duke@435: _wasted = 0; duke@435: _unused = 0; duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void ParGCAllocBuffer::print() { duke@435: gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p" duke@435: "_retained: %c _retained_filler: [%p,%p)\n", duke@435: _bottom, _top, _end, _hard_end, duke@435: "FT"[_retained], _retained_filler.start(), _retained_filler.end()); duke@435: } duke@435: #endif // !PRODUCT duke@435: duke@435: const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords = duke@435: MIN2(CardTableModRefBS::par_chunk_heapword_alignment(), duke@435: ((size_t)Generation::GenGrain)/HeapWordSize); duke@435: const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes = duke@435: MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize, duke@435: (size_t)Generation::GenGrain); duke@435: duke@435: ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz, duke@435: BlockOffsetSharedArray* bsa) : duke@435: ParGCAllocBuffer(word_sz), duke@435: _bsa(bsa), duke@435: _bt(bsa, MemRegion(_bottom, _hard_end)), duke@435: _true_end(_hard_end) duke@435: {} duke@435: duke@435: // The buffer comes with its own BOT, with a shared (obviously) underlying duke@435: // BlockOffsetSharedArray. We manipulate this BOT in the normal way duke@435: // as we would for any contiguous space. However, on accasion we duke@435: // need to do some buffer surgery at the extremities before we duke@435: // start using the body of the buffer for allocations. Such surgery duke@435: // (as explained elsewhere) is to prevent allocation on a card that duke@435: // is in the process of being walked concurrently by another GC thread. duke@435: // When such surgery happens at a point that is far removed (to the duke@435: // right of the current allocation point, top), we use the "contig" duke@435: // parameter below to directly manipulate the shared array without duke@435: // modifying the _next_threshold state in the BOT. duke@435: void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, duke@435: bool contig) { jcoomes@916: CollectedHeap::fill_with_object(mr); duke@435: if (contig) { duke@435: _bt.alloc_block(mr.start(), mr.end()); duke@435: } else { duke@435: _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end()); duke@435: } duke@435: } duke@435: duke@435: HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) { duke@435: HeapWord* res = NULL; duke@435: if (_true_end > _hard_end) { duke@435: assert((HeapWord*)align_size_down(intptr_t(_hard_end), duke@435: ChunkSizeInBytes) == _hard_end, duke@435: "or else _true_end should be equal to _hard_end"); duke@435: assert(_retained, "or else _true_end should be equal to _hard_end"); duke@435: assert(_retained_filler.end() <= _top, "INVARIANT"); jcoomes@916: CollectedHeap::fill_with_object(_retained_filler); duke@435: if (_top < _hard_end) { duke@435: fill_region_with_block(MemRegion(_top, _hard_end), true); duke@435: } duke@435: HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords); duke@435: _retained_filler = MemRegion(_hard_end, FillerHeaderSize); duke@435: _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size()); duke@435: _top = _retained_filler.end(); duke@435: _hard_end = next_hard_end; duke@435: _end = _hard_end - AlignmentReserve; duke@435: res = ParGCAllocBuffer::allocate(word_sz); duke@435: if (res != NULL) { duke@435: _bt.alloc_block(res, word_sz); duke@435: } duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: void duke@435: ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) { duke@435: ParGCAllocBuffer::undo_allocation(obj, word_sz); duke@435: // This may back us up beyond the previous threshold, so reset. duke@435: _bt.set_region(MemRegion(_top, _hard_end)); duke@435: _bt.initialize_threshold(); duke@435: } duke@435: duke@435: void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) { duke@435: assert(!retain || end_of_gc, "Can only retain at GC end."); duke@435: if (_retained) { duke@435: // We're about to make the retained_filler into a block. duke@435: _bt.BlockOffsetArray::alloc_block(_retained_filler.start(), duke@435: _retained_filler.end()); duke@435: } duke@435: // Reset _hard_end to _true_end (and update _end) duke@435: if (retain && _hard_end != NULL) { duke@435: assert(_hard_end <= _true_end, "Invariant."); duke@435: _hard_end = _true_end; duke@435: _end = MAX2(_top, _hard_end - AlignmentReserve); duke@435: assert(_end <= _hard_end, "Invariant."); duke@435: } duke@435: _true_end = _hard_end; duke@435: HeapWord* pre_top = _top; duke@435: duke@435: ParGCAllocBuffer::retire(end_of_gc, retain); duke@435: // Now any old _retained_filler is cut back to size, the free part is duke@435: // filled with a filler object, and top is past the header of that duke@435: // object. duke@435: duke@435: if (retain && _top < _end) { duke@435: assert(end_of_gc && retain, "Or else retain should be false."); duke@435: // If the lab does not start on a card boundary, we don't want to duke@435: // allocate onto that card, since that might lead to concurrent duke@435: // allocation and card scanning, which we don't support. So we fill duke@435: // the first card with a garbage object. duke@435: size_t first_card_index = _bsa->index_for(pre_top); duke@435: HeapWord* first_card_start = _bsa->address_for_index(first_card_index); duke@435: if (first_card_start < pre_top) { duke@435: HeapWord* second_card_start = jmasa@736: _bsa->inc_by_region_size(first_card_start); duke@435: duke@435: // Ensure enough room to fill with the smallest block duke@435: second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve); duke@435: duke@435: // If the end is already in the first card, don't go beyond it! duke@435: // Or if the remainder is too small for a filler object, gobble it up. duke@435: if (_hard_end < second_card_start || duke@435: pointer_delta(_hard_end, second_card_start) < AlignmentReserve) { duke@435: second_card_start = _hard_end; duke@435: } duke@435: if (pre_top < second_card_start) { duke@435: MemRegion first_card_suffix(pre_top, second_card_start); duke@435: fill_region_with_block(first_card_suffix, true); duke@435: } duke@435: pre_top = second_card_start; duke@435: _top = pre_top; duke@435: _end = MAX2(_top, _hard_end - AlignmentReserve); duke@435: } duke@435: duke@435: // If the lab does not end on a card boundary, we don't want to duke@435: // allocate onto that card, since that might lead to concurrent duke@435: // allocation and card scanning, which we don't support. So we fill duke@435: // the last card with a garbage object. duke@435: size_t last_card_index = _bsa->index_for(_hard_end); duke@435: HeapWord* last_card_start = _bsa->address_for_index(last_card_index); duke@435: if (last_card_start < _hard_end) { duke@435: duke@435: // Ensure enough room to fill with the smallest block duke@435: last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve); duke@435: duke@435: // If the top is already in the last card, don't go back beyond it! duke@435: // Or if the remainder is too small for a filler object, gobble it up. duke@435: if (_top > last_card_start || duke@435: pointer_delta(last_card_start, _top) < AlignmentReserve) { duke@435: last_card_start = _top; duke@435: } duke@435: if (last_card_start < _hard_end) { duke@435: MemRegion last_card_prefix(last_card_start, _hard_end); duke@435: fill_region_with_block(last_card_prefix, false); duke@435: } duke@435: _hard_end = last_card_start; duke@435: _end = MAX2(_top, _hard_end - AlignmentReserve); duke@435: _true_end = _hard_end; duke@435: assert(_end <= _hard_end, "Invariant."); duke@435: } duke@435: duke@435: // At this point: duke@435: // 1) we had a filler object from the original top to hard_end. duke@435: // 2) We've filled in any partial cards at the front and back. duke@435: if (pre_top < _hard_end) { duke@435: // Now we can reset the _bt to do allocation in the given area. duke@435: MemRegion new_filler(pre_top, _hard_end); duke@435: fill_region_with_block(new_filler, false); duke@435: _top = pre_top + ParGCAllocBuffer::FillerHeaderSize; duke@435: // If there's no space left, don't retain. duke@435: if (_top >= _end) { duke@435: _retained = false; duke@435: invalidate(); duke@435: return; duke@435: } duke@435: _retained_filler = MemRegion(pre_top, _top); duke@435: _bt.set_region(MemRegion(_top, _hard_end)); duke@435: _bt.initialize_threshold(); duke@435: assert(_bt.threshold() > _top, "initialize_threshold failed!"); duke@435: duke@435: // There may be other reasons for queries into the middle of the duke@435: // filler object. When such queries are done in parallel with duke@435: // allocation, bad things can happen, if the query involves object duke@435: // iteration. So we ensure that such queries do not involve object duke@435: // iteration, by putting another filler object on the boundaries of duke@435: // such queries. One such is the object spanning a parallel card duke@435: // chunk boundary. duke@435: duke@435: // "chunk_boundary" is the address of the first chunk boundary less duke@435: // than "hard_end". duke@435: HeapWord* chunk_boundary = duke@435: (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes); duke@435: assert(chunk_boundary < _hard_end, "Or else above did not work."); duke@435: assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve, duke@435: "Consequence of last card handling above."); duke@435: duke@435: if (_top <= chunk_boundary) { duke@435: assert(_true_end == _hard_end, "Invariant."); duke@435: while (_top <= chunk_boundary) { duke@435: assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, duke@435: "Consequence of last card handling above."); jcoomes@916: _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end); jcoomes@916: CollectedHeap::fill_with_object(chunk_boundary, _hard_end); jcoomes@916: _hard_end = chunk_boundary; duke@435: chunk_boundary -= ChunkSizeInWords; duke@435: } duke@435: _end = _hard_end - AlignmentReserve; duke@435: assert(_top <= _end, "Invariant."); duke@435: // Now reset the initial filler chunk so it doesn't overlap with duke@435: // the one(s) inserted above. duke@435: MemRegion new_filler(pre_top, _hard_end); duke@435: fill_region_with_block(new_filler, false); duke@435: } duke@435: } else { duke@435: _retained = false; duke@435: invalidate(); duke@435: } duke@435: } else { duke@435: assert(!end_of_gc || duke@435: (!_retained && _true_end == _hard_end), "Checking."); duke@435: } duke@435: assert(_end <= _hard_end, "Invariant."); duke@435: assert(_top < _end || _top == _hard_end, "Invariant"); duke@435: }