Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
29 #include "memory/space.hpp"
31 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
32 if (addr >= _bottom && addr < _end) {
33 return block_start_unsafe(addr);
34 } else {
35 return NULL;
36 }
37 }
39 inline HeapWord*
40 G1BlockOffsetTable::block_start_const(const void* addr) const {
41 if (addr >= _bottom && addr < _end) {
42 return block_start_unsafe_const(addr);
43 } else {
44 return NULL;
45 }
46 }
48 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
49 char* pc = (char*)p;
50 assert(pc >= (char*)_reserved.start() &&
51 pc < (char*)_reserved.end(),
52 "p not in range.");
53 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
54 size_t result = delta >> LogN;
55 assert(result < _vs.committed_size(), "bad index from address");
56 return result;
57 }
59 inline HeapWord*
60 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
61 assert(index < _vs.committed_size(), "bad index");
62 HeapWord* result = _reserved.start() + (index << LogN_words);
63 assert(result >= _reserved.start() && result < _reserved.end(),
64 err_msg("bad address from index result " PTR_FORMAT
65 " _reserved.start() " PTR_FORMAT " _reserved.end() "
66 PTR_FORMAT,
67 result, _reserved.start(), _reserved.end()));
68 return result;
69 }
71 inline HeapWord*
72 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
73 bool has_max_index,
74 size_t max_index) const {
75 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
76 size_t index = _array->index_for(addr);
77 // We must make sure that the offset table entry we use is valid. If
78 // "addr" is past the end, start at the last known one and go forward.
79 if (has_max_index) {
80 index = MIN2(index, max_index);
81 }
82 HeapWord* q = _array->address_for_index(index);
84 uint offset = _array->offset_array(index); // Extend u_char to uint.
85 while (offset >= N_words) {
86 // The excess of the offset from N_words indicates a power of Base
87 // to go back by.
88 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
89 q -= (N_words * n_cards_back);
90 assert(q >= _sp->bottom(), "Went below bottom!");
91 index -= n_cards_back;
92 offset = _array->offset_array(index);
93 }
94 assert(offset < N_words, "offset too large");
95 q -= offset;
96 return q;
97 }
99 inline HeapWord*
100 G1BlockOffsetArray::
101 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
102 const void* addr) const {
103 if (csp() != NULL) {
104 if (addr >= csp()->top()) return csp()->top();
105 while (n <= addr) {
106 q = n;
107 oop obj = oop(q);
108 if (obj->klass_or_null() == NULL) return q;
109 n += obj->size();
110 }
111 } else {
112 while (n <= addr) {
113 q = n;
114 oop obj = oop(q);
115 if (obj->klass_or_null() == NULL) return q;
116 n += _sp->block_size(q);
117 }
118 }
119 assert(q <= n, "wrong order for q and addr");
120 assert(addr < n, "wrong order for addr and n");
121 return q;
122 }
124 inline HeapWord*
125 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
126 const void* addr) {
127 if (oop(q)->klass_or_null() == NULL) return q;
128 HeapWord* n = q + _sp->block_size(q);
129 // In the normal case, where the query "addr" is a card boundary, and the
130 // offset table chunks are the same size as cards, the block starting at
131 // "q" will contain addr, so the test below will fail, and we'll fall
132 // through quickly.
133 if (n <= addr) {
134 q = forward_to_block_containing_addr_slow(q, n, addr);
135 }
136 assert(q <= addr, "wrong order for current and arg");
137 return q;
138 }
140 //////////////////////////////////////////////////////////////////////////
141 // BlockOffsetArrayNonContigSpace inlines
142 //////////////////////////////////////////////////////////////////////////
143 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
144 // Verify that the BOT shows [blk_start, blk_end) to be one block.
145 verify_single_block(blk_start, blk_end);
146 // adjust _unallocated_block upward or downward
147 // as appropriate
148 if (BlockOffsetArrayUseUnallocatedBlock) {
149 assert(_unallocated_block <= _end,
150 "Inconsistent value for _unallocated_block");
151 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
152 // CMS-specific note: a block abutting _unallocated_block to
153 // its left is being freed, a new block is being added or
154 // we are resetting following a compaction
155 _unallocated_block = blk_start;
156 }
157 }
158 }
160 inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
161 freed(blk, blk + size);
162 }
164 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP