Mon, 15 May 2017 12:20:15 +0200
8180048: Interned string and symbol table leak memory during parallel unlinking
Summary: Make appending found dead BasicHashtableEntrys to the free list atomic.
Reviewed-by: ehelin, shade
duke@435 | 1 | /* |
tschatzl@8766 | 2 | * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
coleenp@3904 | 26 | #include "classfile/altHashing.hpp" |
coleenp@3904 | 27 | #include "classfile/javaClasses.hpp" |
stefank@2314 | 28 | #include "memory/allocation.inline.hpp" |
coleenp@3875 | 29 | #include "memory/filemap.hpp" |
stefank@2314 | 30 | #include "memory/resourceArea.hpp" |
stefank@2314 | 31 | #include "oops/oop.inline.hpp" |
stefank@2314 | 32 | #include "runtime/safepoint.hpp" |
stefank@2314 | 33 | #include "utilities/dtrace.hpp" |
stefank@2314 | 34 | #include "utilities/hashtable.hpp" |
stefank@2314 | 35 | #include "utilities/hashtable.inline.hpp" |
iklam@5144 | 36 | #include "utilities/numberSeq.hpp" |
duke@435 | 37 | |
coleenp@2497 | 38 | |
mgerdin@7207 | 39 | // This hashtable is implemented as an open hash table with a fixed number of buckets. |
duke@435 | 40 | |
mgerdin@7207 | 41 | template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() { |
mgerdin@7207 | 42 | BasicHashtableEntry<F>* entry = NULL; |
mgerdin@7207 | 43 | if (_free_list != NULL) { |
duke@435 | 44 | entry = _free_list; |
duke@435 | 45 | _free_list = _free_list->next(); |
mgerdin@7207 | 46 | } |
mgerdin@7207 | 47 | return entry; |
mgerdin@7207 | 48 | } |
mgerdin@7207 | 49 | |
mgerdin@7207 | 50 | // HashtableEntrys are allocated in blocks to reduce the space overhead. |
mgerdin@7207 | 51 | template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) { |
mgerdin@7207 | 52 | BasicHashtableEntry<F>* entry = new_entry_free_list(); |
mgerdin@7207 | 53 | |
mgerdin@7207 | 54 | if (entry == NULL) { |
jrose@867 | 55 | if (_first_free_entry + _entry_size >= _end_block) { |
jrose@867 | 56 | int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); |
duke@435 | 57 | int len = _entry_size * block_size; |
jrose@867 | 58 | len = 1 << log2_intptr(len); // round down to power of 2 |
jrose@867 | 59 | assert(len >= _entry_size, ""); |
zgu@3900 | 60 | _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); |
duke@435 | 61 | _end_block = _first_free_entry + len; |
duke@435 | 62 | } |
zgu@3900 | 63 | entry = (BasicHashtableEntry<F>*)_first_free_entry; |
duke@435 | 64 | _first_free_entry += _entry_size; |
duke@435 | 65 | } |
duke@435 | 66 | |
jrose@867 | 67 | assert(_entry_size % HeapWordSize == 0, ""); |
duke@435 | 68 | entry->set_hash(hashValue); |
duke@435 | 69 | return entry; |
duke@435 | 70 | } |
duke@435 | 71 | |
duke@435 | 72 | |
zgu@3900 | 73 | template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) { |
zgu@3900 | 74 | HashtableEntry<T, F>* entry; |
duke@435 | 75 | |
zgu@3900 | 76 | entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue); |
coleenp@2497 | 77 | entry->set_literal(obj); |
duke@435 | 78 | return entry; |
duke@435 | 79 | } |
duke@435 | 80 | |
coleenp@3865 | 81 | // Check to see if the hashtable is unbalanced. The caller set a flag to |
coleenp@3865 | 82 | // rehash at the next safepoint. If this bucket is 60 times greater than the |
coleenp@3865 | 83 | // expected average bucket length, it's an unbalanced hashtable. |
coleenp@3865 | 84 | // This is somewhat an arbitrary heuristic but if one bucket gets to |
coleenp@3865 | 85 | // rehash_count which is currently 100, there's probably something wrong. |
coleenp@3865 | 86 | |
mgerdin@7207 | 87 | template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) { |
mgerdin@7207 | 88 | assert(this->table_size() != 0, "underflow"); |
mgerdin@7207 | 89 | if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { |
coleenp@3865 | 90 | // Set a flag for the next safepoint, which should be at some guaranteed |
coleenp@3865 | 91 | // safepoint interval. |
coleenp@3865 | 92 | return true; |
coleenp@3865 | 93 | } |
coleenp@3865 | 94 | return false; |
coleenp@3865 | 95 | } |
coleenp@3865 | 96 | |
mgerdin@7207 | 97 | template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0; |
coleenp@3904 | 98 | |
coleenp@3865 | 99 | // Create a new table and using alternate hash code, populate the new table |
coleenp@3865 | 100 | // with the existing elements. This can be used to change the hash code |
coleenp@3865 | 101 | // and could in the future change the size of the table. |
coleenp@3865 | 102 | |
mgerdin@7207 | 103 | template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) { |
coleenp@3904 | 104 | |
coleenp@3904 | 105 | // Initialize the global seed for hashing. |
coleenp@3904 | 106 | _seed = AltHashing::compute_seed(); |
coleenp@3904 | 107 | assert(seed() != 0, "shouldn't be zero"); |
coleenp@3904 | 108 | |
coleenp@3904 | 109 | int saved_entry_count = this->number_of_entries(); |
coleenp@3865 | 110 | |
coleenp@3865 | 111 | // Iterate through the table and create a new entry for the new table |
coleenp@3865 | 112 | for (int i = 0; i < new_table->table_size(); ++i) { |
mgerdin@7207 | 113 | for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) { |
zgu@3900 | 114 | HashtableEntry<T, F>* next = p->next(); |
coleenp@3865 | 115 | T string = p->literal(); |
coleenp@3865 | 116 | // Use alternate hashing algorithm on the symbol in the first table |
coleenp@4037 | 117 | unsigned int hashValue = string->new_hash(seed()); |
coleenp@3865 | 118 | // Get a new index relative to the new table (can also change size) |
coleenp@3865 | 119 | int index = new_table->hash_to_index(hashValue); |
coleenp@3865 | 120 | p->set_hash(hashValue); |
coleenp@3875 | 121 | // Keep the shared bit in the Hashtable entry to indicate that this entry |
coleenp@3875 | 122 | // can't be deleted. The shared bit is the LSB in the _next field so |
coleenp@3875 | 123 | // walking the hashtable past these entries requires |
coleenp@3875 | 124 | // BasicHashtableEntry::make_ptr() call. |
coleenp@3875 | 125 | bool keep_shared = p->is_shared(); |
andrew@3963 | 126 | this->unlink_entry(p); |
coleenp@3865 | 127 | new_table->add_entry(index, p); |
coleenp@3875 | 128 | if (keep_shared) { |
coleenp@3875 | 129 | p->set_shared(); |
coleenp@3875 | 130 | } |
coleenp@3865 | 131 | p = next; |
coleenp@3865 | 132 | } |
coleenp@3865 | 133 | } |
coleenp@3865 | 134 | // give the new table the free list as well |
coleenp@3865 | 135 | new_table->copy_freelist(this); |
coleenp@3865 | 136 | assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); |
coleenp@3865 | 137 | |
coleenp@3865 | 138 | // Destroy memory used by the buckets in the hashtable. The memory |
coleenp@3865 | 139 | // for the elements has been used in a new table and is not |
coleenp@3865 | 140 | // destroyed. The memory reuse will benefit resizing the SystemDictionary |
coleenp@3865 | 141 | // to avoid a memory allocation spike at safepoint. |
zgu@3900 | 142 | BasicHashtable<F>::free_buckets(); |
coleenp@3865 | 143 | } |
coleenp@3865 | 144 | |
zgu@3900 | 145 | template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() { |
coleenp@3875 | 146 | if (NULL != _buckets) { |
coleenp@3875 | 147 | // Don't delete the buckets in the shared space. They aren't |
coleenp@3875 | 148 | // allocated by os::malloc |
coleenp@3875 | 149 | if (!UseSharedSpaces || |
coleenp@3875 | 150 | !FileMapInfo::current_info()->is_in_shared_space(_buckets)) { |
zgu@3900 | 151 | FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F); |
coleenp@3875 | 152 | } |
coleenp@3875 | 153 | _buckets = NULL; |
coleenp@3875 | 154 | } |
coleenp@3875 | 155 | } |
coleenp@3875 | 156 | |
coleenp@3875 | 157 | |
duke@435 | 158 | // Reverse the order of elements in the hash buckets. |
duke@435 | 159 | |
zgu@3900 | 160 | template <MEMFLAGS F> void BasicHashtable<F>::reverse() { |
duke@435 | 161 | |
duke@435 | 162 | for (int i = 0; i < _table_size; ++i) { |
zgu@3900 | 163 | BasicHashtableEntry<F>* new_list = NULL; |
zgu@3900 | 164 | BasicHashtableEntry<F>* p = bucket(i); |
duke@435 | 165 | while (p != NULL) { |
zgu@3900 | 166 | BasicHashtableEntry<F>* next = p->next(); |
duke@435 | 167 | p->set_next(new_list); |
duke@435 | 168 | new_list = p; |
duke@435 | 169 | p = next; |
duke@435 | 170 | } |
duke@435 | 171 | *bucket_addr(i) = new_list; |
duke@435 | 172 | } |
duke@435 | 173 | } |
duke@435 | 174 | |
tschatzl@8766 | 175 | template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) { |
tschatzl@8766 | 176 | entry->set_next(_removed_head); |
tschatzl@8766 | 177 | _removed_head = entry; |
tschatzl@8766 | 178 | if (_removed_tail == NULL) { |
tschatzl@8766 | 179 | _removed_tail = entry; |
tschatzl@8766 | 180 | } |
tschatzl@8766 | 181 | _num_removed++; |
tschatzl@8766 | 182 | } |
tschatzl@8766 | 183 | |
tschatzl@8766 | 184 | template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) { |
tschatzl@8766 | 185 | if (context->_num_removed == 0) { |
tschatzl@8766 | 186 | assert(context->_removed_head == NULL && context->_removed_tail == NULL, |
tschatzl@8766 | 187 | err_msg("Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT, |
tschatzl@8766 | 188 | p2i(context->_removed_head), p2i(context->_removed_tail))); |
tschatzl@8766 | 189 | return; |
tschatzl@8766 | 190 | } |
tschatzl@8766 | 191 | |
tschatzl@8766 | 192 | // MT-safe add of the list of BasicHashTableEntrys from the context to the free list. |
tschatzl@8766 | 193 | BasicHashtableEntry<F>* current = _free_list; |
tschatzl@8766 | 194 | while (true) { |
tschatzl@8766 | 195 | context->_removed_tail->set_next(current); |
tschatzl@8766 | 196 | BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current); |
tschatzl@8766 | 197 | if (old == current) { |
tschatzl@8766 | 198 | break; |
tschatzl@8766 | 199 | } |
tschatzl@8766 | 200 | current = old; |
tschatzl@8766 | 201 | } |
tschatzl@8766 | 202 | Atomic::add(-context->_num_removed, &_number_of_entries); |
tschatzl@8766 | 203 | } |
duke@435 | 204 | |
duke@435 | 205 | // Copy the table to the shared space. |
duke@435 | 206 | |
zgu@3900 | 207 | template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) { |
duke@435 | 208 | |
duke@435 | 209 | // Dump the hash table entries. |
duke@435 | 210 | |
duke@435 | 211 | intptr_t *plen = (intptr_t*)(*top); |
duke@435 | 212 | *top += sizeof(*plen); |
duke@435 | 213 | |
duke@435 | 214 | int i; |
duke@435 | 215 | for (i = 0; i < _table_size; ++i) { |
zgu@3900 | 216 | for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr(); |
duke@435 | 217 | *p != NULL; |
duke@435 | 218 | p = (*p)->next_addr()) { |
duke@435 | 219 | if (*top + entry_size() > end) { |
coleenp@2497 | 220 | report_out_of_shared_space(SharedMiscData); |
duke@435 | 221 | } |
zgu@3900 | 222 | *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size()); |
duke@435 | 223 | *top += entry_size(); |
duke@435 | 224 | } |
duke@435 | 225 | } |
duke@435 | 226 | *plen = (char*)(*top) - (char*)plen - sizeof(*plen); |
duke@435 | 227 | |
duke@435 | 228 | // Set the shared bit. |
duke@435 | 229 | |
duke@435 | 230 | for (i = 0; i < _table_size; ++i) { |
zgu@3900 | 231 | for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { |
duke@435 | 232 | p->set_shared(); |
duke@435 | 233 | } |
duke@435 | 234 | } |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | |
duke@435 | 238 | |
duke@435 | 239 | // Reverse the order of elements in the hash buckets. |
duke@435 | 240 | |
zgu@3900 | 241 | template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) { |
duke@435 | 242 | |
zgu@3900 | 243 | for (int i = 0; i < this->table_size(); ++i) { |
zgu@3900 | 244 | HashtableEntry<T, F>* high_list = NULL; |
zgu@3900 | 245 | HashtableEntry<T, F>* low_list = NULL; |
zgu@3900 | 246 | HashtableEntry<T, F>* last_low_entry = NULL; |
zgu@3900 | 247 | HashtableEntry<T, F>* p = bucket(i); |
duke@435 | 248 | while (p != NULL) { |
zgu@3900 | 249 | HashtableEntry<T, F>* next = p->next(); |
duke@435 | 250 | if ((void*)p->literal() >= boundary) { |
duke@435 | 251 | p->set_next(high_list); |
duke@435 | 252 | high_list = p; |
duke@435 | 253 | } else { |
duke@435 | 254 | p->set_next(low_list); |
duke@435 | 255 | low_list = p; |
duke@435 | 256 | if (last_low_entry == NULL) { |
duke@435 | 257 | last_low_entry = p; |
duke@435 | 258 | } |
duke@435 | 259 | } |
duke@435 | 260 | p = next; |
duke@435 | 261 | } |
duke@435 | 262 | if (low_list != NULL) { |
duke@435 | 263 | *bucket_addr(i) = low_list; |
duke@435 | 264 | last_low_entry->set_next(high_list); |
duke@435 | 265 | } else { |
duke@435 | 266 | *bucket_addr(i) = high_list; |
duke@435 | 267 | } |
duke@435 | 268 | } |
duke@435 | 269 | } |
duke@435 | 270 | |
mgerdin@7207 | 271 | template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) { |
iklam@5144 | 272 | return symbol->size() * HeapWordSize; |
iklam@5144 | 273 | } |
iklam@5144 | 274 | |
mgerdin@7207 | 275 | template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) { |
iklam@5144 | 276 | // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, |
iklam@5144 | 277 | // and the String.value array is shared by several Strings. However, starting from JDK8, |
iklam@5144 | 278 | // the String.value array is not shared anymore. |
iklam@5144 | 279 | assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported"); |
iklam@5144 | 280 | return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize; |
iklam@5144 | 281 | } |
iklam@5144 | 282 | |
iklam@5144 | 283 | // Dump footprint and bucket length statistics |
iklam@5144 | 284 | // |
iklam@5144 | 285 | // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to |
iklam@5144 | 286 | // add a new function Hashtable<T, F>::literal_size(MyNewType lit) |
iklam@5144 | 287 | |
mgerdin@7207 | 288 | template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) { |
iklam@5144 | 289 | NumberSeq summary; |
iklam@5144 | 290 | int literal_bytes = 0; |
iklam@5144 | 291 | for (int i = 0; i < this->table_size(); ++i) { |
iklam@5144 | 292 | int count = 0; |
mgerdin@7207 | 293 | for (HashtableEntry<T, F>* e = this->bucket(i); |
iklam@5144 | 294 | e != NULL; e = e->next()) { |
iklam@5144 | 295 | count++; |
iklam@5144 | 296 | literal_bytes += literal_size(e->literal()); |
iklam@5144 | 297 | } |
iklam@5144 | 298 | summary.add((double)count); |
iklam@5144 | 299 | } |
iklam@5144 | 300 | double num_buckets = summary.num(); |
iklam@5144 | 301 | double num_entries = summary.sum(); |
iklam@5144 | 302 | |
mgerdin@7207 | 303 | int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>); |
iklam@5144 | 304 | int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>); |
iklam@5144 | 305 | int total_bytes = literal_bytes + bucket_bytes + entry_bytes; |
iklam@5144 | 306 | |
iklam@5144 | 307 | double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); |
iklam@5144 | 308 | double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); |
iklam@5144 | 309 | double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); |
iklam@5144 | 310 | |
iklam@5144 | 311 | st->print_cr("%s statistics:", table_name); |
iklam@5144 | 312 | st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg); |
iklam@5144 | 313 | st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg); |
iklam@5144 | 314 | st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg); |
iklam@5144 | 315 | st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes); |
iklam@5144 | 316 | st->print_cr("Average bucket size : %9.3f", summary.avg()); |
iklam@5144 | 317 | st->print_cr("Variance of bucket size : %9.3f", summary.variance()); |
iklam@5144 | 318 | st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); |
iklam@5144 | 319 | st->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); |
iklam@5144 | 320 | } |
iklam@5144 | 321 | |
duke@435 | 322 | |
duke@435 | 323 | // Dump the hash table buckets. |
duke@435 | 324 | |
zgu@3900 | 325 | template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) { |
zgu@3900 | 326 | intptr_t len = _table_size * sizeof(HashtableBucket<F>); |
duke@435 | 327 | *(intptr_t*)(*top) = len; |
duke@435 | 328 | *top += sizeof(intptr_t); |
duke@435 | 329 | |
duke@435 | 330 | *(intptr_t*)(*top) = _number_of_entries; |
duke@435 | 331 | *top += sizeof(intptr_t); |
duke@435 | 332 | |
duke@435 | 333 | if (*top + len > end) { |
coleenp@2497 | 334 | report_out_of_shared_space(SharedMiscData); |
duke@435 | 335 | } |
zgu@3900 | 336 | _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len); |
duke@435 | 337 | *top += len; |
duke@435 | 338 | } |
duke@435 | 339 | |
duke@435 | 340 | |
duke@435 | 341 | #ifndef PRODUCT |
duke@435 | 342 | |
zgu@3900 | 343 | template <class T, MEMFLAGS F> void Hashtable<T, F>::print() { |
duke@435 | 344 | ResourceMark rm; |
duke@435 | 345 | |
zgu@3900 | 346 | for (int i = 0; i < BasicHashtable<F>::table_size(); i++) { |
zgu@3900 | 347 | HashtableEntry<T, F>* entry = bucket(i); |
duke@435 | 348 | while(entry != NULL) { |
duke@435 | 349 | tty->print("%d : ", i); |
duke@435 | 350 | entry->literal()->print(); |
duke@435 | 351 | tty->cr(); |
duke@435 | 352 | entry = entry->next(); |
duke@435 | 353 | } |
duke@435 | 354 | } |
duke@435 | 355 | } |
duke@435 | 356 | |
duke@435 | 357 | |
zgu@3900 | 358 | template <MEMFLAGS F> void BasicHashtable<F>::verify() { |
duke@435 | 359 | int count = 0; |
duke@435 | 360 | for (int i = 0; i < table_size(); i++) { |
zgu@3900 | 361 | for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) { |
duke@435 | 362 | ++count; |
duke@435 | 363 | } |
duke@435 | 364 | } |
duke@435 | 365 | assert(count == number_of_entries(), "number of hashtable entries incorrect"); |
duke@435 | 366 | } |
duke@435 | 367 | |
duke@435 | 368 | |
duke@435 | 369 | #endif // PRODUCT |
duke@435 | 370 | |
duke@435 | 371 | |
duke@435 | 372 | #ifdef ASSERT |
duke@435 | 373 | |
zgu@3900 | 374 | template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) { |
duke@435 | 375 | if ((double)_lookup_length / (double)_lookup_count > load * 2.0) { |
duke@435 | 376 | warning("Performance bug: SystemDictionary lookup_count=%d " |
duke@435 | 377 | "lookup_length=%d average=%lf load=%f", |
duke@435 | 378 | _lookup_count, _lookup_length, |
duke@435 | 379 | (double) _lookup_length / _lookup_count, load); |
duke@435 | 380 | } |
duke@435 | 381 | } |
duke@435 | 382 | |
duke@435 | 383 | #endif |
coleenp@2497 | 384 | // Explicitly instantiate these types |
mgerdin@7208 | 385 | #if INCLUDE_ALL_GCS |
mgerdin@7208 | 386 | template class Hashtable<nmethod*, mtGC>; |
mgerdin@7208 | 387 | template class HashtableEntry<nmethod*, mtGC>; |
mgerdin@7208 | 388 | template class BasicHashtable<mtGC>; |
mgerdin@7208 | 389 | #endif |
coleenp@4037 | 390 | template class Hashtable<ConstantPool*, mtClass>; |
mgerdin@7207 | 391 | template class RehashableHashtable<Symbol*, mtSymbol>; |
mgerdin@7207 | 392 | template class RehashableHashtable<oopDesc*, mtSymbol>; |
zgu@3900 | 393 | template class Hashtable<Symbol*, mtSymbol>; |
coleenp@4037 | 394 | template class Hashtable<Klass*, mtClass>; |
zgu@3900 | 395 | template class Hashtable<oop, mtClass>; |
hseigel@5784 | 396 | #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) |
zgu@3900 | 397 | template class Hashtable<oop, mtSymbol>; |
mgerdin@7207 | 398 | template class RehashableHashtable<oop, mtSymbol>; |
hseigel@5784 | 399 | #endif // SOLARIS || CHECK_UNHANDLED_OOPS |
zgu@3900 | 400 | template class Hashtable<oopDesc*, mtSymbol>; |
zgu@3900 | 401 | template class Hashtable<Symbol*, mtClass>; |
zgu@3900 | 402 | template class HashtableEntry<Symbol*, mtSymbol>; |
zgu@3900 | 403 | template class HashtableEntry<Symbol*, mtClass>; |
zgu@3900 | 404 | template class HashtableEntry<oop, mtSymbol>; |
zgu@3900 | 405 | template class BasicHashtableEntry<mtSymbol>; |
zgu@3900 | 406 | template class BasicHashtableEntry<mtCode>; |
zgu@3900 | 407 | template class BasicHashtable<mtClass>; |
zgu@3900 | 408 | template class BasicHashtable<mtSymbol>; |
zgu@3900 | 409 | template class BasicHashtable<mtCode>; |
zgu@3900 | 410 | template class BasicHashtable<mtInternal>; |