Thu, 26 Jun 2014 11:36:58 +0200
8047818: G1 HeapRegions can no longer be ContiguousSpaces
Summary: Change parent of G1OffsetTableContigSpace to CompactibleSpace, reimplement missing functionality
Reviewed-by: stefank, jmasa, tschatzl
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "memory/blockOffsetTable.hpp" |
stefank@2314 | 29 | #include "memory/space.hpp" |
stefank@2314 | 30 | #include "runtime/safepoint.hpp" |
stefank@2314 | 31 | |
duke@435 | 32 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 33 | // BlockOffsetTable inlines |
duke@435 | 34 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 35 | inline HeapWord* BlockOffsetTable::block_start(const void* addr) const { |
duke@435 | 36 | if (addr >= _bottom && addr < _end) { |
duke@435 | 37 | return block_start_unsafe(addr); |
duke@435 | 38 | } else { |
duke@435 | 39 | return NULL; |
duke@435 | 40 | } |
duke@435 | 41 | } |
duke@435 | 42 | |
duke@435 | 43 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 44 | // BlockOffsetSharedArray inlines |
duke@435 | 45 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 46 | inline size_t BlockOffsetSharedArray::index_for(const void* p) const { |
duke@435 | 47 | char* pc = (char*)p; |
duke@435 | 48 | assert(pc >= (char*)_reserved.start() && |
duke@435 | 49 | pc < (char*)_reserved.end(), |
duke@435 | 50 | "p not in range."); |
duke@435 | 51 | size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); |
duke@435 | 52 | size_t result = delta >> LogN; |
duke@435 | 53 | assert(result < _vs.committed_size(), "bad index from address"); |
duke@435 | 54 | return result; |
duke@435 | 55 | } |
duke@435 | 56 | |
duke@435 | 57 | inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const { |
duke@435 | 58 | assert(index < _vs.committed_size(), "bad index"); |
duke@435 | 59 | HeapWord* result = _reserved.start() + (index << LogN_words); |
duke@435 | 60 | assert(result >= _reserved.start() && result < _reserved.end(), |
duke@435 | 61 | "bad address from index"); |
duke@435 | 62 | return result; |
duke@435 | 63 | } |
duke@435 | 64 | |
ysr@2071 | 65 | inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) { |
ysr@2071 | 66 | assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() || |
ysr@2071 | 67 | Thread::current()->is_VM_thread() || |
ysr@2071 | 68 | Thread::current()->is_ConcurrentGC_thread() || |
ysr@2071 | 69 | ((!Thread::current()->is_ConcurrentGC_thread()) && |
ysr@2071 | 70 | ParGCRareEvent_lock->owned_by_self()), "Crack"); |
ysr@2071 | 71 | } |
duke@435 | 72 | |
duke@435 | 73 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 74 | // BlockOffsetArrayNonContigSpace inlines |
duke@435 | 75 | ////////////////////////////////////////////////////////////////////////// |
ysr@2071 | 76 | inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk, |
ysr@2071 | 77 | size_t size) { |
ysr@2071 | 78 | freed(blk, blk + size); |
ysr@2071 | 79 | } |
ysr@2071 | 80 | |
duke@435 | 81 | inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start, |
duke@435 | 82 | HeapWord* blk_end) { |
duke@435 | 83 | // Verify that the BOT shows [blk_start, blk_end) to be one block. |
duke@435 | 84 | verify_single_block(blk_start, blk_end); |
duke@435 | 85 | // adjust _unallocated_block upward or downward |
duke@435 | 86 | // as appropriate |
duke@435 | 87 | if (BlockOffsetArrayUseUnallocatedBlock) { |
duke@435 | 88 | assert(_unallocated_block <= _end, |
duke@435 | 89 | "Inconsistent value for _unallocated_block"); |
duke@435 | 90 | if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { |
duke@435 | 91 | // CMS-specific note: a block abutting _unallocated_block to |
duke@435 | 92 | // its left is being freed, a new block is being added or |
duke@435 | 93 | // we are resetting following a compaction |
duke@435 | 94 | _unallocated_block = blk_start; |
duke@435 | 95 | } |
duke@435 | 96 | } |
duke@435 | 97 | } |
stefank@2314 | 98 | |
stefank@2314 | 99 | #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP |