src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2314
f95d63e2154a
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2007, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
ysr@777 26 if (addr >= _bottom && addr < _end) {
ysr@777 27 return block_start_unsafe(addr);
ysr@777 28 } else {
ysr@777 29 return NULL;
ysr@777 30 }
ysr@777 31 }
ysr@777 32
ysr@777 33 inline HeapWord*
ysr@777 34 G1BlockOffsetTable::block_start_const(const void* addr) const {
ysr@777 35 if (addr >= _bottom && addr < _end) {
ysr@777 36 return block_start_unsafe_const(addr);
ysr@777 37 } else {
ysr@777 38 return NULL;
ysr@777 39 }
ysr@777 40 }
ysr@777 41
ysr@777 42 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
ysr@777 43 char* pc = (char*)p;
ysr@777 44 assert(pc >= (char*)_reserved.start() &&
ysr@777 45 pc < (char*)_reserved.end(),
ysr@777 46 "p not in range.");
ysr@777 47 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
ysr@777 48 size_t result = delta >> LogN;
ysr@777 49 assert(result < _vs.committed_size(), "bad index from address");
ysr@777 50 return result;
ysr@777 51 }
ysr@777 52
ysr@777 53 inline HeapWord*
ysr@777 54 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
ysr@777 55 assert(index < _vs.committed_size(), "bad index");
ysr@777 56 HeapWord* result = _reserved.start() + (index << LogN_words);
ysr@777 57 assert(result >= _reserved.start() && result < _reserved.end(),
ysr@777 58 "bad address from index");
ysr@777 59 return result;
ysr@777 60 }
ysr@777 61
ysr@777 62 inline HeapWord*
ysr@777 63 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
ysr@777 64 bool has_max_index,
ysr@777 65 size_t max_index) const {
ysr@777 66 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
ysr@777 67 size_t index = _array->index_for(addr);
ysr@777 68 // We must make sure that the offset table entry we use is valid. If
ysr@777 69 // "addr" is past the end, start at the last known one and go forward.
ysr@777 70 if (has_max_index) {
ysr@777 71 index = MIN2(index, max_index);
ysr@777 72 }
ysr@777 73 HeapWord* q = _array->address_for_index(index);
ysr@777 74
ysr@777 75 uint offset = _array->offset_array(index); // Extend u_char to uint.
ysr@777 76 while (offset >= N_words) {
ysr@777 77 // The excess of the offset from N_words indicates a power of Base
ysr@777 78 // to go back by.
ysr@777 79 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
ysr@777 80 q -= (N_words * n_cards_back);
ysr@777 81 assert(q >= _sp->bottom(), "Went below bottom!");
ysr@777 82 index -= n_cards_back;
ysr@777 83 offset = _array->offset_array(index);
ysr@777 84 }
ysr@777 85 assert(offset < N_words, "offset too large");
ysr@777 86 q -= offset;
ysr@777 87 return q;
ysr@777 88 }
ysr@777 89
ysr@777 90 inline HeapWord*
ysr@777 91 G1BlockOffsetArray::
ysr@777 92 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
ysr@777 93 const void* addr) const {
ysr@777 94 if (csp() != NULL) {
ysr@777 95 if (addr >= csp()->top()) return csp()->top();
ysr@777 96 while (n <= addr) {
ysr@777 97 q = n;
ysr@777 98 oop obj = oop(q);
ysr@1280 99 if (obj->klass_or_null() == NULL) return q;
ysr@777 100 n += obj->size();
ysr@777 101 }
ysr@777 102 } else {
ysr@777 103 while (n <= addr) {
ysr@777 104 q = n;
ysr@777 105 oop obj = oop(q);
ysr@1280 106 if (obj->klass_or_null() == NULL) return q;
ysr@777 107 n += _sp->block_size(q);
ysr@777 108 }
ysr@777 109 }
ysr@777 110 assert(q <= n, "wrong order for q and addr");
ysr@777 111 assert(addr < n, "wrong order for addr and n");
ysr@777 112 return q;
ysr@777 113 }
ysr@777 114
ysr@777 115 inline HeapWord*
ysr@777 116 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
ysr@777 117 const void* addr) {
ysr@1280 118 if (oop(q)->klass_or_null() == NULL) return q;
ysr@777 119 HeapWord* n = q + _sp->block_size(q);
ysr@777 120 // In the normal case, where the query "addr" is a card boundary, and the
ysr@777 121 // offset table chunks are the same size as cards, the block starting at
ysr@777 122 // "q" will contain addr, so the test below will fail, and we'll fall
ysr@777 123 // through quickly.
ysr@777 124 if (n <= addr) {
ysr@777 125 q = forward_to_block_containing_addr_slow(q, n, addr);
ysr@777 126 }
ysr@777 127 assert(q <= addr, "wrong order for current and arg");
ysr@777 128 return q;
ysr@777 129 }
ysr@777 130
ysr@777 131 //////////////////////////////////////////////////////////////////////////
ysr@777 132 // BlockOffsetArrayNonContigSpace inlines
ysr@777 133 //////////////////////////////////////////////////////////////////////////
ysr@777 134 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 135 // Verify that the BOT shows [blk_start, blk_end) to be one block.
ysr@777 136 verify_single_block(blk_start, blk_end);
ysr@777 137 // adjust _unallocated_block upward or downward
ysr@777 138 // as appropriate
ysr@777 139 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 140 assert(_unallocated_block <= _end,
ysr@777 141 "Inconsistent value for _unallocated_block");
ysr@777 142 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
ysr@777 143 // CMS-specific note: a block abutting _unallocated_block to
ysr@777 144 // its left is being freed, a new block is being added or
ysr@777 145 // we are resetting following a compaction
ysr@777 146 _unallocated_block = blk_start;
ysr@777 147 }
ysr@777 148 }
ysr@777 149 }
ysr@777 150
ysr@777 151 inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
ysr@777 152 freed(blk, blk + size);
ysr@777 153 }

mercurial