Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/heapRegion.inline.hpp"
31 #include "memory/space.hpp"
33 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
34 if (addr >= _bottom && addr < _end) {
35 return block_start_unsafe(addr);
36 } else {
37 return NULL;
38 }
39 }
41 inline HeapWord*
42 G1BlockOffsetTable::block_start_const(const void* addr) const {
43 if (addr >= _bottom && addr < _end) {
44 return block_start_unsafe_const(addr);
45 } else {
46 return NULL;
47 }
48 }
50 #define check_index(index, msg) \
51 assert((index) < (_reserved.word_size() >> LogN_words), \
52 err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
53 msg, (index), (_reserved.word_size() >> LogN_words))); \
54 assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \
55 err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \
56 " (%u) is not in committed area.", \
57 (index), \
58 p2i(address_for_index_raw(index)), \
59 G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
61 u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
62 check_index(index, "index out of range");
63 return _offset_array[index];
64 }
66 void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
67 check_index(index, "index out of range");
68 set_offset_array_raw(index, offset);
69 }
71 void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
72 check_index(index, "index out of range");
73 assert(high >= low, "addresses out of order");
74 size_t offset = pointer_delta(high, low);
75 check_offset(offset, "offset too large");
76 set_offset_array(index, (u_char)offset);
77 }
79 void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
80 check_index(right, "right index out of range");
81 assert(left <= right, "indexes out of order");
82 size_t num_cards = right - left + 1;
83 if (UseMemSetInBOT) {
84 memset(&_offset_array[left], offset, num_cards);
85 } else {
86 size_t i = left;
87 const size_t end = i + num_cards;
88 for (; i < end; i++) {
89 _offset_array[i] = offset;
90 }
91 }
92 }
94 // Variant of index_for that does not check the index for validity.
95 inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
96 return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
97 }
99 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
100 char* pc = (char*)p;
101 assert(pc >= (char*)_reserved.start() &&
102 pc < (char*)_reserved.end(),
103 err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
104 p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
105 size_t result = index_for_raw(p);
106 check_index(result, "bad index from address");
107 return result;
108 }
110 inline HeapWord*
111 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
112 check_index(index, "index out of range");
113 HeapWord* result = address_for_index_raw(index);
114 assert(result >= _reserved.start() && result < _reserved.end(),
115 err_msg("bad address from index result " PTR_FORMAT
116 " _reserved.start() " PTR_FORMAT " _reserved.end() "
117 PTR_FORMAT,
118 p2i(result), p2i(_reserved.start()), p2i(_reserved.end())));
119 return result;
120 }
122 #undef check_index
124 inline size_t
125 G1BlockOffsetArray::block_size(const HeapWord* p) const {
126 return gsp()->block_size(p);
127 }
129 inline HeapWord*
130 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
131 bool has_max_index,
132 size_t max_index) const {
133 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
134 size_t index = _array->index_for(addr);
135 // We must make sure that the offset table entry we use is valid. If
136 // "addr" is past the end, start at the last known one and go forward.
137 if (has_max_index) {
138 index = MIN2(index, max_index);
139 }
140 HeapWord* q = _array->address_for_index(index);
142 uint offset = _array->offset_array(index); // Extend u_char to uint.
143 while (offset >= N_words) {
144 // The excess of the offset from N_words indicates a power of Base
145 // to go back by.
146 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
147 q -= (N_words * n_cards_back);
148 assert(q >= gsp()->bottom(), "Went below bottom!");
149 index -= n_cards_back;
150 offset = _array->offset_array(index);
151 }
152 assert(offset < N_words, "offset too large");
153 q -= offset;
154 return q;
155 }
157 inline HeapWord*
158 G1BlockOffsetArray::
159 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
160 const void* addr) const {
161 if (addr >= gsp()->top()) return gsp()->top();
162 while (n <= addr) {
163 q = n;
164 oop obj = oop(q);
165 if (obj->klass_or_null() == NULL) return q;
166 n += block_size(q);
167 }
168 assert(q <= n, "wrong order for q and addr");
169 assert(addr < n, "wrong order for addr and n");
170 return q;
171 }
173 inline HeapWord*
174 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
175 const void* addr) {
176 if (oop(q)->klass_or_null() == NULL) return q;
177 HeapWord* n = q + block_size(q);
178 // In the normal case, where the query "addr" is a card boundary, and the
179 // offset table chunks are the same size as cards, the block starting at
180 // "q" will contain addr, so the test below will fail, and we'll fall
181 // through quickly.
182 if (n <= addr) {
183 q = forward_to_block_containing_addr_slow(q, n, addr);
184 }
185 assert(q <= addr, "wrong order for current and arg");
186 return q;
187 }
189 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP