ysr@777: /* coleenp@4037: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP stefank@2314: stefank@2314: #include "gc_implementation/g1/g1BlockOffsetTable.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: ysr@777: inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { ysr@777: if (addr >= _bottom && addr < _end) { ysr@777: return block_start_unsafe(addr); ysr@777: } else { ysr@777: return NULL; ysr@777: } ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1BlockOffsetTable::block_start_const(const void* addr) const { ysr@777: if (addr >= _bottom && addr < _end) { ysr@777: return block_start_unsafe_const(addr); ysr@777: } else { ysr@777: return NULL; ysr@777: } ysr@777: } ysr@777: ysr@777: inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { ysr@777: char* pc = (char*)p; ysr@777: assert(pc >= (char*)_reserved.start() && ysr@777: pc < (char*)_reserved.end(), ysr@777: "p not in range."); ysr@777: size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); ysr@777: size_t result = delta >> LogN; ysr@777: assert(result < _vs.committed_size(), "bad index from address"); ysr@777: return result; ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1BlockOffsetSharedArray::address_for_index(size_t index) const { ysr@777: assert(index < _vs.committed_size(), "bad index"); ysr@777: HeapWord* result = _reserved.start() + (index << LogN_words); ysr@777: assert(result >= _reserved.start() && result < _reserved.end(), coleenp@4037: err_msg("bad address from index result " PTR_FORMAT coleenp@4037: " _reserved.start() " PTR_FORMAT " _reserved.end() " coleenp@4037: PTR_FORMAT, coleenp@4037: result, _reserved.start(), _reserved.end())); ysr@777: return result; ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1BlockOffsetArray::block_at_or_preceding(const void* addr, ysr@777: bool has_max_index, ysr@777: size_t max_index) const { ysr@777: assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); ysr@777: size_t index = _array->index_for(addr); ysr@777: // We must make sure that the offset table entry we use is valid. If ysr@777: // "addr" is past the end, start at the last known one and go forward. ysr@777: if (has_max_index) { ysr@777: index = MIN2(index, max_index); ysr@777: } ysr@777: HeapWord* q = _array->address_for_index(index); ysr@777: ysr@777: uint offset = _array->offset_array(index); // Extend u_char to uint. ysr@777: while (offset >= N_words) { ysr@777: // The excess of the offset from N_words indicates a power of Base ysr@777: // to go back by. ysr@777: size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); ysr@777: q -= (N_words * n_cards_back); ysr@777: assert(q >= _sp->bottom(), "Went below bottom!"); ysr@777: index -= n_cards_back; ysr@777: offset = _array->offset_array(index); ysr@777: } ysr@777: assert(offset < N_words, "offset too large"); ysr@777: q -= offset; ysr@777: return q; ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1BlockOffsetArray:: ysr@777: forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, ysr@777: const void* addr) const { ysr@777: if (csp() != NULL) { ysr@777: if (addr >= csp()->top()) return csp()->top(); ysr@777: while (n <= addr) { ysr@777: q = n; ysr@777: oop obj = oop(q); ysr@1280: if (obj->klass_or_null() == NULL) return q; ysr@777: n += obj->size(); ysr@777: } ysr@777: } else { ysr@777: while (n <= addr) { ysr@777: q = n; ysr@777: oop obj = oop(q); ysr@1280: if (obj->klass_or_null() == NULL) return q; ysr@777: n += _sp->block_size(q); ysr@777: } ysr@777: } ysr@777: assert(q <= n, "wrong order for q and addr"); ysr@777: assert(addr < n, "wrong order for addr and n"); ysr@777: return q; ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, ysr@777: const void* addr) { ysr@1280: if (oop(q)->klass_or_null() == NULL) return q; ysr@777: HeapWord* n = q + _sp->block_size(q); ysr@777: // In the normal case, where the query "addr" is a card boundary, and the ysr@777: // offset table chunks are the same size as cards, the block starting at ysr@777: // "q" will contain addr, so the test below will fail, and we'll fall ysr@777: // through quickly. ysr@777: if (n <= addr) { ysr@777: q = forward_to_block_containing_addr_slow(q, n, addr); ysr@777: } ysr@777: assert(q <= addr, "wrong order for current and arg"); ysr@777: return q; ysr@777: } ysr@777: ysr@777: ////////////////////////////////////////////////////////////////////////// ysr@777: // BlockOffsetArrayNonContigSpace inlines ysr@777: ////////////////////////////////////////////////////////////////////////// ysr@777: inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) { ysr@777: // Verify that the BOT shows [blk_start, blk_end) to be one block. ysr@777: verify_single_block(blk_start, blk_end); ysr@777: // adjust _unallocated_block upward or downward ysr@777: // as appropriate ysr@777: if (BlockOffsetArrayUseUnallocatedBlock) { ysr@777: assert(_unallocated_block <= _end, ysr@777: "Inconsistent value for _unallocated_block"); ysr@777: if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { ysr@777: // CMS-specific note: a block abutting _unallocated_block to ysr@777: // its left is being freed, a new block is being added or ysr@777: // we are resetting following a compaction ysr@777: _unallocated_block = blk_start; ysr@777: } ysr@777: } ysr@777: } ysr@777: ysr@777: inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) { ysr@777: freed(blk, blk + size); ysr@777: } stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP