duke@435: /* coleenp@4037: * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP stefank@2314: #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP stefank@2314: stefank@2314: #include "memory/blockOffsetTable.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "runtime/safepoint.hpp" stefank@2314: duke@435: ////////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetTable inlines duke@435: ////////////////////////////////////////////////////////////////////////// duke@435: inline HeapWord* BlockOffsetTable::block_start(const void* addr) const { duke@435: if (addr >= _bottom && addr < _end) { duke@435: return block_start_unsafe(addr); duke@435: } else { duke@435: return NULL; duke@435: } duke@435: } duke@435: duke@435: ////////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetSharedArray inlines duke@435: ////////////////////////////////////////////////////////////////////////// duke@435: inline size_t BlockOffsetSharedArray::index_for(const void* p) const { duke@435: char* pc = (char*)p; duke@435: assert(pc >= (char*)_reserved.start() && duke@435: pc < (char*)_reserved.end(), duke@435: "p not in range."); duke@435: size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); duke@435: size_t result = delta >> LogN; duke@435: assert(result < _vs.committed_size(), "bad index from address"); duke@435: return result; duke@435: } duke@435: duke@435: inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const { duke@435: assert(index < _vs.committed_size(), "bad index"); duke@435: HeapWord* result = _reserved.start() + (index << LogN_words); duke@435: assert(result >= _reserved.start() && result < _reserved.end(), duke@435: "bad address from index"); duke@435: return result; duke@435: } duke@435: ysr@2071: inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) { ysr@2071: assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() || ysr@2071: Thread::current()->is_VM_thread() || ysr@2071: Thread::current()->is_ConcurrentGC_thread() || ysr@2071: ((!Thread::current()->is_ConcurrentGC_thread()) && ysr@2071: ParGCRareEvent_lock->owned_by_self()), "Crack"); ysr@2071: } duke@435: duke@435: ////////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetArrayNonContigSpace inlines duke@435: ////////////////////////////////////////////////////////////////////////// ysr@2071: inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk, ysr@2071: size_t size) { ysr@2071: freed(blk, blk + size); ysr@2071: } ysr@2071: duke@435: inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start, duke@435: HeapWord* blk_end) { duke@435: // Verify that the BOT shows [blk_start, blk_end) to be one block. duke@435: verify_single_block(blk_start, blk_end); duke@435: // adjust _unallocated_block upward or downward duke@435: // as appropriate duke@435: if (BlockOffsetArrayUseUnallocatedBlock) { duke@435: assert(_unallocated_block <= _end, duke@435: "Inconsistent value for _unallocated_block"); duke@435: if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { duke@435: // CMS-specific note: a block abutting _unallocated_block to duke@435: // its left is being freed, a new block is being added or duke@435: // we are resetting following a compaction duke@435: _unallocated_block = blk_start; duke@435: } duke@435: } duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP