Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
1 /*
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nmethod.hpp"
28 #include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
29 #include "gc_implementation/g1/heapRegion.hpp"
30 #include "memory/heap.hpp"
31 #include "memory/iterator.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "utilities/hashtable.inline.hpp"
34 #include "utilities/stack.inline.hpp"
36 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
38 class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
39 friend class G1CodeRootSetTest;
40 typedef HashtableEntry<nmethod*, mtGC> Entry;
42 static CodeRootSetTable* volatile _purge_list;
44 CodeRootSetTable* _purge_next;
46 unsigned int compute_hash(nmethod* nm) {
47 uintptr_t hash = (uintptr_t)nm;
48 return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
49 }
51 void remove_entry(Entry* e, Entry* previous);
52 Entry* new_entry(nmethod* nm);
54 public:
55 CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
56 ~CodeRootSetTable();
58 // Needs to be protected locks
59 bool add(nmethod* nm);
60 bool remove(nmethod* nm);
62 // Can be called without locking
63 bool contains(nmethod* nm);
65 int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
67 void copy_to(CodeRootSetTable* new_table);
68 void nmethods_do(CodeBlobClosure* blk);
70 template<typename CB>
71 int remove_if(CB& should_remove);
73 static void purge_list_append(CodeRootSetTable* tbl);
74 static void purge();
76 static size_t static_mem_size() {
77 return sizeof(_purge_list);
78 }
79 };
81 CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
83 CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
84 unsigned int hash = compute_hash(nm);
85 Entry* entry = (Entry*) new_entry_free_list();
86 if (entry == NULL) {
87 entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
88 }
89 entry->set_next(NULL);
90 entry->set_hash(hash);
91 entry->set_literal(nm);
92 return entry;
93 }
95 void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
96 int index = hash_to_index(e->hash());
97 assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
99 if (previous == NULL) {
100 set_entry(index, e->next());
101 } else {
102 previous->set_next(e->next());
103 }
104 free_entry(e);
105 }
107 CodeRootSetTable::~CodeRootSetTable() {
108 for (int index = 0; index < table_size(); ++index) {
109 for (Entry* e = bucket(index); e != NULL; ) {
110 Entry* to_remove = e;
111 // read next before freeing.
112 e = e->next();
113 unlink_entry(to_remove);
114 FREE_C_HEAP_ARRAY(char, to_remove, mtGC);
115 }
116 }
117 assert(number_of_entries() == 0, "should have removed all entries");
118 free_buckets();
119 for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
120 FREE_C_HEAP_ARRAY(char, e, mtGC);
121 }
122 }
124 bool CodeRootSetTable::add(nmethod* nm) {
125 if (!contains(nm)) {
126 Entry* e = new_entry(nm);
127 int index = hash_to_index(e->hash());
128 add_entry(index, e);
129 return true;
130 }
131 return false;
132 }
134 bool CodeRootSetTable::contains(nmethod* nm) {
135 int index = hash_to_index(compute_hash(nm));
136 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
137 if (e->literal() == nm) {
138 return true;
139 }
140 }
141 return false;
142 }
144 bool CodeRootSetTable::remove(nmethod* nm) {
145 int index = hash_to_index(compute_hash(nm));
146 Entry* previous = NULL;
147 for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
148 if (e->literal() == nm) {
149 remove_entry(e, previous);
150 return true;
151 }
152 }
153 return false;
154 }
156 void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
157 for (int index = 0; index < table_size(); ++index) {
158 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
159 new_table->add(e->literal());
160 }
161 }
162 new_table->copy_freelist(this);
163 }
165 void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
166 for (int index = 0; index < table_size(); ++index) {
167 for (Entry* e = bucket(index); e != NULL; e = e->next()) {
168 blk->do_code_blob(e->literal());
169 }
170 }
171 }
173 template<typename CB>
174 int CodeRootSetTable::remove_if(CB& should_remove) {
175 int num_removed = 0;
176 for (int index = 0; index < table_size(); ++index) {
177 Entry* previous = NULL;
178 Entry* e = bucket(index);
179 while (e != NULL) {
180 Entry* next = e->next();
181 if (should_remove(e->literal())) {
182 remove_entry(e, previous);
183 ++num_removed;
184 } else {
185 previous = e;
186 }
187 e = next;
188 }
189 }
190 return num_removed;
191 }
193 G1CodeRootSet::~G1CodeRootSet() {
194 delete _table;
195 }
197 CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
198 return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
199 }
201 void G1CodeRootSet::allocate_small_table() {
202 _table = new CodeRootSetTable(SmallSize);
203 }
205 void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
206 for (;;) {
207 table->_purge_next = _purge_list;
208 CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
209 if (old == table->_purge_next) {
210 break;
211 }
212 }
213 }
215 void CodeRootSetTable::purge() {
216 CodeRootSetTable* table = _purge_list;
217 _purge_list = NULL;
218 while (table != NULL) {
219 CodeRootSetTable* to_purge = table;
220 table = table->_purge_next;
221 delete to_purge;
222 }
223 }
225 void G1CodeRootSet::move_to_large() {
226 CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
228 _table->copy_to(temp);
230 CodeRootSetTable::purge_list_append(_table);
232 OrderAccess::release_store_ptr(&_table, temp);
233 }
236 void G1CodeRootSet::purge() {
237 CodeRootSetTable::purge();
238 }
240 size_t G1CodeRootSet::static_mem_size() {
241 return CodeRootSetTable::static_mem_size();
242 }
244 void G1CodeRootSet::add(nmethod* method) {
245 bool added = false;
246 if (is_empty()) {
247 allocate_small_table();
248 }
249 added = _table->add(method);
250 if (_length == Threshold) {
251 move_to_large();
252 }
253 if (added) {
254 ++_length;
255 }
256 }
258 bool G1CodeRootSet::remove(nmethod* method) {
259 bool removed = false;
260 if (_table != NULL) {
261 removed = _table->remove(method);
262 }
263 if (removed) {
264 _length--;
265 if (_length == 0) {
266 clear();
267 }
268 }
269 return removed;
270 }
272 bool G1CodeRootSet::contains(nmethod* method) {
273 CodeRootSetTable* table = load_acquire_table();
274 if (table != NULL) {
275 return table->contains(method);
276 }
277 return false;
278 }
280 void G1CodeRootSet::clear() {
281 delete _table;
282 _table = NULL;
283 _length = 0;
284 }
286 size_t G1CodeRootSet::mem_size() {
287 return sizeof(*this) +
288 (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
289 }
291 void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
292 if (_table != NULL) {
293 _table->nmethods_do(blk);
294 }
295 }
297 class CleanCallback : public StackObj {
298 class PointsIntoHRDetectionClosure : public OopClosure {
299 HeapRegion* _hr;
300 public:
301 bool _points_into;
302 PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
304 void do_oop(narrowOop* o) {
305 do_oop_work(o);
306 }
308 void do_oop(oop* o) {
309 do_oop_work(o);
310 }
312 template <typename T>
313 void do_oop_work(T* p) {
314 if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
315 _points_into = true;
316 }
317 }
318 };
320 PointsIntoHRDetectionClosure _detector;
321 CodeBlobToOopClosure _blobs;
323 public:
324 CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
326 bool operator() (nmethod* nm) {
327 _detector._points_into = false;
328 _blobs.do_code_blob(nm);
329 return !_detector._points_into;
330 }
331 };
333 void G1CodeRootSet::clean(HeapRegion* owner) {
334 CleanCallback should_clean(owner);
335 if (_table != NULL) {
336 int removed = _table->remove_if(should_clean);
337 assert((size_t)removed <= _length, "impossible");
338 _length -= removed;
339 }
340 if (_length == 0) {
341 clear();
342 }
343 }
345 #ifndef PRODUCT
347 class G1CodeRootSetTest {
348 public:
349 static void test() {
350 {
351 G1CodeRootSet set1;
352 assert(set1.is_empty(), "Code root set must be initially empty but is not.");
354 assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
355 err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
357 set1.add((nmethod*)1);
358 assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
359 SIZE_FORMAT" elements", set1.length()));
361 const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
363 for (size_t i = 1; i <= num_to_add; i++) {
364 set1.add((nmethod*)1);
365 }
366 assert(set1.length() == 1,
367 err_msg("Duplicate detection should not have increased the set size but "
368 "is "SIZE_FORMAT, set1.length()));
370 for (size_t i = 2; i <= num_to_add; i++) {
371 set1.add((nmethod*)(uintptr_t)(i));
372 }
373 assert(set1.length() == num_to_add,
374 err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
375 "need to be in the set, but there are only "SIZE_FORMAT,
376 num_to_add, set1.length()));
378 assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
380 size_t num_popped = 0;
381 for (size_t i = 1; i <= num_to_add; i++) {
382 bool removed = set1.remove((nmethod*)i);
383 if (removed) {
384 num_popped += 1;
385 } else {
386 break;
387 }
388 }
389 assert(num_popped == num_to_add,
390 err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
391 "were added", num_popped, num_to_add));
392 assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
394 G1CodeRootSet::purge();
396 assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
398 }
400 }
401 };
403 void TestCodeCacheRemSet_test() {
404 G1CodeRootSetTest::test();
405 }
407 #endif