Thu, 07 Aug 2014 22:28:53 +0200
8054341: Remove some obsolete code in G1CollectedHeap class
Summary: Remove dead code.
Reviewed-by: stefank, brutisso
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@0 | 25 | #include "precompiled.hpp" |
aoqi@0 | 26 | #include "classfile/altHashing.hpp" |
aoqi@0 | 27 | #include "classfile/javaClasses.hpp" |
aoqi@0 | 28 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
aoqi@0 | 29 | #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
aoqi@0 | 30 | #include "gc_implementation/g1/g1StringDedupTable.hpp" |
aoqi@0 | 31 | #include "memory/gcLocker.hpp" |
aoqi@0 | 32 | #include "memory/padded.inline.hpp" |
aoqi@0 | 33 | #include "oops/typeArrayOop.hpp" |
aoqi@0 | 34 | #include "runtime/mutexLocker.hpp" |
aoqi@0 | 35 | |
aoqi@0 | 36 | // |
aoqi@0 | 37 | // Freelist in the deduplication table entry cache. Links table |
aoqi@0 | 38 | // entries together using their _next fields. |
aoqi@0 | 39 | // |
aoqi@0 | 40 | class G1StringDedupEntryFreeList : public CHeapObj<mtGC> { |
aoqi@0 | 41 | private: |
aoqi@0 | 42 | G1StringDedupEntry* _list; |
aoqi@0 | 43 | size_t _length; |
aoqi@0 | 44 | |
aoqi@0 | 45 | public: |
aoqi@0 | 46 | G1StringDedupEntryFreeList() : |
aoqi@0 | 47 | _list(NULL), |
aoqi@0 | 48 | _length(0) { |
aoqi@0 | 49 | } |
aoqi@0 | 50 | |
aoqi@0 | 51 | void add(G1StringDedupEntry* entry) { |
aoqi@0 | 52 | entry->set_next(_list); |
aoqi@0 | 53 | _list = entry; |
aoqi@0 | 54 | _length++; |
aoqi@0 | 55 | } |
aoqi@0 | 56 | |
aoqi@0 | 57 | G1StringDedupEntry* remove() { |
aoqi@0 | 58 | G1StringDedupEntry* entry = _list; |
aoqi@0 | 59 | if (entry != NULL) { |
aoqi@0 | 60 | _list = entry->next(); |
aoqi@0 | 61 | _length--; |
aoqi@0 | 62 | } |
aoqi@0 | 63 | return entry; |
aoqi@0 | 64 | } |
aoqi@0 | 65 | |
aoqi@0 | 66 | size_t length() { |
aoqi@0 | 67 | return _length; |
aoqi@0 | 68 | } |
aoqi@0 | 69 | }; |
aoqi@0 | 70 | |
aoqi@0 | 71 | // |
aoqi@0 | 72 | // Cache of deduplication table entries. This cache provides fast allocation and |
aoqi@0 | 73 | // reuse of table entries to lower the pressure on the underlying allocator. |
aoqi@0 | 74 | // But more importantly, it provides fast/deferred freeing of table entries. This |
aoqi@0 | 75 | // is important because freeing of table entries is done during stop-the-world |
aoqi@0 | 76 | // phases and it is not uncommon for large number of entries to be freed at once. |
aoqi@0 | 77 | // Tables entries that are freed during these phases are placed onto a freelist in |
aoqi@0 | 78 | // the cache. The deduplication thread, which executes in a concurrent phase, will |
aoqi@0 | 79 | // later reuse or free the underlying memory for these entries. |
aoqi@0 | 80 | // |
aoqi@0 | 81 | // The cache allows for single-threaded allocations and multi-threaded frees. |
aoqi@0 | 82 | // Allocations are synchronized by StringDedupTable_lock as part of a table |
aoqi@0 | 83 | // modification. |
aoqi@0 | 84 | // |
aoqi@0 | 85 | class G1StringDedupEntryCache : public CHeapObj<mtGC> { |
aoqi@0 | 86 | private: |
aoqi@0 | 87 | // One freelist per GC worker to allow lock less freeing of |
aoqi@0 | 88 | // entries while doing a parallel scan of the table. Using |
aoqi@0 | 89 | // PaddedEnd to avoid false sharing. |
aoqi@0 | 90 | PaddedEnd<G1StringDedupEntryFreeList>* _lists; |
aoqi@0 | 91 | size_t _nlists; |
aoqi@0 | 92 | |
aoqi@0 | 93 | public: |
aoqi@0 | 94 | G1StringDedupEntryCache(); |
aoqi@0 | 95 | ~G1StringDedupEntryCache(); |
aoqi@0 | 96 | |
aoqi@0 | 97 | // Get a table entry from the cache freelist, or allocate a new |
aoqi@0 | 98 | // entry if the cache is empty. |
aoqi@0 | 99 | G1StringDedupEntry* alloc(); |
aoqi@0 | 100 | |
aoqi@0 | 101 | // Insert a table entry into the cache freelist. |
aoqi@0 | 102 | void free(G1StringDedupEntry* entry, uint worker_id); |
aoqi@0 | 103 | |
aoqi@0 | 104 | // Returns current number of entries in the cache. |
aoqi@0 | 105 | size_t size(); |
aoqi@0 | 106 | |
aoqi@0 | 107 | // If the cache has grown above the given max size, trim it down |
aoqi@0 | 108 | // and deallocate the memory occupied by trimmed of entries. |
aoqi@0 | 109 | void trim(size_t max_size); |
aoqi@0 | 110 | }; |
aoqi@0 | 111 | |
aoqi@0 | 112 | G1StringDedupEntryCache::G1StringDedupEntryCache() { |
aoqi@0 | 113 | _nlists = MAX2(ParallelGCThreads, (size_t)1); |
aoqi@0 | 114 | _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists); |
aoqi@0 | 115 | } |
aoqi@0 | 116 | |
aoqi@0 | 117 | G1StringDedupEntryCache::~G1StringDedupEntryCache() { |
aoqi@0 | 118 | ShouldNotReachHere(); |
aoqi@0 | 119 | } |
aoqi@0 | 120 | |
aoqi@0 | 121 | G1StringDedupEntry* G1StringDedupEntryCache::alloc() { |
aoqi@0 | 122 | for (size_t i = 0; i < _nlists; i++) { |
aoqi@0 | 123 | G1StringDedupEntry* entry = _lists[i].remove(); |
aoqi@0 | 124 | if (entry != NULL) { |
aoqi@0 | 125 | return entry; |
aoqi@0 | 126 | } |
aoqi@0 | 127 | } |
aoqi@0 | 128 | return new G1StringDedupEntry(); |
aoqi@0 | 129 | } |
aoqi@0 | 130 | |
aoqi@0 | 131 | void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) { |
aoqi@0 | 132 | assert(entry->obj() != NULL, "Double free"); |
aoqi@0 | 133 | assert(worker_id < _nlists, "Invalid worker id"); |
aoqi@0 | 134 | entry->set_obj(NULL); |
aoqi@0 | 135 | entry->set_hash(0); |
aoqi@0 | 136 | _lists[worker_id].add(entry); |
aoqi@0 | 137 | } |
aoqi@0 | 138 | |
aoqi@0 | 139 | size_t G1StringDedupEntryCache::size() { |
aoqi@0 | 140 | size_t size = 0; |
aoqi@0 | 141 | for (size_t i = 0; i < _nlists; i++) { |
aoqi@0 | 142 | size += _lists[i].length(); |
aoqi@0 | 143 | } |
aoqi@0 | 144 | return size; |
aoqi@0 | 145 | } |
aoqi@0 | 146 | |
aoqi@0 | 147 | void G1StringDedupEntryCache::trim(size_t max_size) { |
aoqi@0 | 148 | size_t cache_size = 0; |
aoqi@0 | 149 | for (size_t i = 0; i < _nlists; i++) { |
aoqi@0 | 150 | G1StringDedupEntryFreeList* list = &_lists[i]; |
aoqi@0 | 151 | cache_size += list->length(); |
aoqi@0 | 152 | while (cache_size > max_size) { |
aoqi@0 | 153 | G1StringDedupEntry* entry = list->remove(); |
aoqi@0 | 154 | assert(entry != NULL, "Should not be null"); |
aoqi@0 | 155 | cache_size--; |
aoqi@0 | 156 | delete entry; |
aoqi@0 | 157 | } |
aoqi@0 | 158 | } |
aoqi@0 | 159 | } |
aoqi@0 | 160 | |
aoqi@0 | 161 | G1StringDedupTable* G1StringDedupTable::_table = NULL; |
aoqi@0 | 162 | G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL; |
aoqi@0 | 163 | |
aoqi@0 | 164 | const size_t G1StringDedupTable::_min_size = (1 << 10); // 1024 |
aoqi@0 | 165 | const size_t G1StringDedupTable::_max_size = (1 << 24); // 16777216 |
aoqi@0 | 166 | const double G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load |
aoqi@0 | 167 | const double G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load |
aoqi@0 | 168 | const double G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size |
aoqi@0 | 169 | const uintx G1StringDedupTable::_rehash_multiple = 60; // Hash bucket has 60 times more collisions than expected |
aoqi@0 | 170 | const uintx G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor); |
aoqi@0 | 171 | |
aoqi@0 | 172 | uintx G1StringDedupTable::_entries_added = 0; |
aoqi@0 | 173 | uintx G1StringDedupTable::_entries_removed = 0; |
aoqi@0 | 174 | uintx G1StringDedupTable::_resize_count = 0; |
aoqi@0 | 175 | uintx G1StringDedupTable::_rehash_count = 0; |
aoqi@0 | 176 | |
aoqi@0 | 177 | G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) : |
aoqi@0 | 178 | _size(size), |
aoqi@0 | 179 | _entries(0), |
aoqi@0 | 180 | _grow_threshold((uintx)(size * _grow_load_factor)), |
aoqi@0 | 181 | _shrink_threshold((uintx)(size * _shrink_load_factor)), |
aoqi@0 | 182 | _rehash_needed(false), |
aoqi@0 | 183 | _hash_seed(hash_seed) { |
aoqi@0 | 184 | assert(is_power_of_2(size), "Table size must be a power of 2"); |
aoqi@0 | 185 | _buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC); |
aoqi@0 | 186 | memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*)); |
aoqi@0 | 187 | } |
aoqi@0 | 188 | |
aoqi@0 | 189 | G1StringDedupTable::~G1StringDedupTable() { |
aoqi@0 | 190 | FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets, mtGC); |
aoqi@0 | 191 | } |
aoqi@0 | 192 | |
aoqi@0 | 193 | void G1StringDedupTable::create() { |
aoqi@0 | 194 | assert(_table == NULL, "One string deduplication table allowed"); |
aoqi@0 | 195 | _entry_cache = new G1StringDedupEntryCache(); |
aoqi@0 | 196 | _table = new G1StringDedupTable(_min_size); |
aoqi@0 | 197 | } |
aoqi@0 | 198 | |
aoqi@0 | 199 | void G1StringDedupTable::add(typeArrayOop value, unsigned int hash, G1StringDedupEntry** list) { |
aoqi@0 | 200 | G1StringDedupEntry* entry = _entry_cache->alloc(); |
aoqi@0 | 201 | entry->set_obj(value); |
aoqi@0 | 202 | entry->set_hash(hash); |
aoqi@0 | 203 | entry->set_next(*list); |
aoqi@0 | 204 | *list = entry; |
aoqi@0 | 205 | _entries++; |
aoqi@0 | 206 | } |
aoqi@0 | 207 | |
aoqi@0 | 208 | void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) { |
aoqi@0 | 209 | G1StringDedupEntry* entry = *pentry; |
aoqi@0 | 210 | *pentry = entry->next(); |
aoqi@0 | 211 | _entry_cache->free(entry, worker_id); |
aoqi@0 | 212 | } |
aoqi@0 | 213 | |
aoqi@0 | 214 | void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) { |
aoqi@0 | 215 | G1StringDedupEntry* entry = *pentry; |
aoqi@0 | 216 | *pentry = entry->next(); |
aoqi@0 | 217 | unsigned int hash = entry->hash(); |
aoqi@0 | 218 | size_t index = dest->hash_to_index(hash); |
aoqi@0 | 219 | G1StringDedupEntry** list = dest->bucket(index); |
aoqi@0 | 220 | entry->set_next(*list); |
aoqi@0 | 221 | *list = entry; |
aoqi@0 | 222 | } |
aoqi@0 | 223 | |
aoqi@0 | 224 | bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) { |
aoqi@0 | 225 | return (value1 == value2 || |
aoqi@0 | 226 | (value1->length() == value2->length() && |
aoqi@0 | 227 | (!memcmp(value1->base(T_CHAR), |
aoqi@0 | 228 | value2->base(T_CHAR), |
aoqi@0 | 229 | value1->length() * sizeof(jchar))))); |
aoqi@0 | 230 | } |
aoqi@0 | 231 | |
aoqi@0 | 232 | typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, unsigned int hash, |
aoqi@0 | 233 | G1StringDedupEntry** list, uintx &count) { |
aoqi@0 | 234 | for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) { |
aoqi@0 | 235 | if (entry->hash() == hash) { |
aoqi@0 | 236 | typeArrayOop existing_value = entry->obj(); |
aoqi@0 | 237 | if (equals(value, existing_value)) { |
aoqi@0 | 238 | // Match found |
aoqi@0 | 239 | return existing_value; |
aoqi@0 | 240 | } |
aoqi@0 | 241 | } |
aoqi@0 | 242 | count++; |
aoqi@0 | 243 | } |
aoqi@0 | 244 | |
aoqi@0 | 245 | // Not found |
aoqi@0 | 246 | return NULL; |
aoqi@0 | 247 | } |
aoqi@0 | 248 | |
aoqi@0 | 249 | typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, unsigned int hash) { |
aoqi@0 | 250 | size_t index = hash_to_index(hash); |
aoqi@0 | 251 | G1StringDedupEntry** list = bucket(index); |
aoqi@0 | 252 | uintx count = 0; |
aoqi@0 | 253 | |
aoqi@0 | 254 | // Lookup in list |
aoqi@0 | 255 | typeArrayOop existing_value = lookup(value, hash, list, count); |
aoqi@0 | 256 | |
aoqi@0 | 257 | // Check if rehash is needed |
aoqi@0 | 258 | if (count > _rehash_threshold) { |
aoqi@0 | 259 | _rehash_needed = true; |
aoqi@0 | 260 | } |
aoqi@0 | 261 | |
aoqi@0 | 262 | if (existing_value == NULL) { |
aoqi@0 | 263 | // Not found, add new entry |
aoqi@0 | 264 | add(value, hash, list); |
aoqi@0 | 265 | |
aoqi@0 | 266 | // Update statistics |
aoqi@0 | 267 | _entries_added++; |
aoqi@0 | 268 | } |
aoqi@0 | 269 | |
aoqi@0 | 270 | return existing_value; |
aoqi@0 | 271 | } |
aoqi@0 | 272 | |
aoqi@0 | 273 | unsigned int G1StringDedupTable::hash_code(typeArrayOop value) { |
aoqi@0 | 274 | unsigned int hash; |
aoqi@0 | 275 | int length = value->length(); |
aoqi@0 | 276 | const jchar* data = (jchar*)value->base(T_CHAR); |
aoqi@0 | 277 | |
aoqi@0 | 278 | if (use_java_hash()) { |
aoqi@0 | 279 | hash = java_lang_String::hash_code(data, length); |
aoqi@0 | 280 | } else { |
aoqi@0 | 281 | hash = AltHashing::murmur3_32(_table->_hash_seed, data, length); |
aoqi@0 | 282 | } |
aoqi@0 | 283 | |
aoqi@0 | 284 | return hash; |
aoqi@0 | 285 | } |
aoqi@0 | 286 | |
aoqi@0 | 287 | void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) { |
aoqi@0 | 288 | assert(java_lang_String::is_instance(java_string), "Must be a string"); |
aoqi@0 | 289 | No_Safepoint_Verifier nsv; |
aoqi@0 | 290 | |
aoqi@0 | 291 | stat.inc_inspected(); |
aoqi@0 | 292 | |
aoqi@0 | 293 | typeArrayOop value = java_lang_String::value(java_string); |
aoqi@0 | 294 | if (value == NULL) { |
aoqi@0 | 295 | // String has no value |
aoqi@0 | 296 | stat.inc_skipped(); |
aoqi@0 | 297 | return; |
aoqi@0 | 298 | } |
aoqi@0 | 299 | |
aoqi@0 | 300 | unsigned int hash = 0; |
aoqi@0 | 301 | |
aoqi@0 | 302 | if (use_java_hash()) { |
aoqi@0 | 303 | // Get hash code from cache |
aoqi@0 | 304 | hash = java_lang_String::hash(java_string); |
aoqi@0 | 305 | } |
aoqi@0 | 306 | |
aoqi@0 | 307 | if (hash == 0) { |
aoqi@0 | 308 | // Compute hash |
aoqi@0 | 309 | hash = hash_code(value); |
aoqi@0 | 310 | stat.inc_hashed(); |
aoqi@0 | 311 | } |
aoqi@0 | 312 | |
aoqi@0 | 313 | if (use_java_hash() && hash != 0) { |
aoqi@0 | 314 | // Store hash code in cache |
aoqi@0 | 315 | java_lang_String::set_hash(java_string, hash); |
aoqi@0 | 316 | } |
aoqi@0 | 317 | |
aoqi@0 | 318 | typeArrayOop existing_value = lookup_or_add(value, hash); |
aoqi@0 | 319 | if (existing_value == value) { |
aoqi@0 | 320 | // Same value, already known |
aoqi@0 | 321 | stat.inc_known(); |
aoqi@0 | 322 | return; |
aoqi@0 | 323 | } |
aoqi@0 | 324 | |
aoqi@0 | 325 | // Get size of value array |
aoqi@0 | 326 | uintx size_in_bytes = value->size() * HeapWordSize; |
aoqi@0 | 327 | stat.inc_new(size_in_bytes); |
aoqi@0 | 328 | |
aoqi@0 | 329 | if (existing_value != NULL) { |
aoqi@0 | 330 | // Enqueue the reference to make sure it is kept alive. Concurrent mark might |
aoqi@0 | 331 | // otherwise declare it dead if there are no other strong references to this object. |
aoqi@0 | 332 | G1SATBCardTableModRefBS::enqueue(existing_value); |
aoqi@0 | 333 | |
aoqi@0 | 334 | // Existing value found, deduplicate string |
aoqi@0 | 335 | java_lang_String::set_value(java_string, existing_value); |
aoqi@0 | 336 | |
aoqi@0 | 337 | if (G1CollectedHeap::heap()->is_in_young(value)) { |
aoqi@0 | 338 | stat.inc_deduped_young(size_in_bytes); |
aoqi@0 | 339 | } else { |
aoqi@0 | 340 | stat.inc_deduped_old(size_in_bytes); |
aoqi@0 | 341 | } |
aoqi@0 | 342 | } |
aoqi@0 | 343 | } |
aoqi@0 | 344 | |
aoqi@0 | 345 | G1StringDedupTable* G1StringDedupTable::prepare_resize() { |
aoqi@0 | 346 | size_t size = _table->_size; |
aoqi@0 | 347 | |
aoqi@0 | 348 | // Check if the hashtable needs to be resized |
aoqi@0 | 349 | if (_table->_entries > _table->_grow_threshold) { |
aoqi@0 | 350 | // Grow table, double the size |
aoqi@0 | 351 | size *= 2; |
aoqi@0 | 352 | if (size > _max_size) { |
aoqi@0 | 353 | // Too big, don't resize |
aoqi@0 | 354 | return NULL; |
aoqi@0 | 355 | } |
aoqi@0 | 356 | } else if (_table->_entries < _table->_shrink_threshold) { |
aoqi@0 | 357 | // Shrink table, half the size |
aoqi@0 | 358 | size /= 2; |
aoqi@0 | 359 | if (size < _min_size) { |
aoqi@0 | 360 | // Too small, don't resize |
aoqi@0 | 361 | return NULL; |
aoqi@0 | 362 | } |
aoqi@0 | 363 | } else if (StringDeduplicationResizeALot) { |
aoqi@0 | 364 | // Force grow |
aoqi@0 | 365 | size *= 2; |
aoqi@0 | 366 | if (size > _max_size) { |
aoqi@0 | 367 | // Too big, force shrink instead |
aoqi@0 | 368 | size /= 4; |
aoqi@0 | 369 | } |
aoqi@0 | 370 | } else { |
aoqi@0 | 371 | // Resize not needed |
aoqi@0 | 372 | return NULL; |
aoqi@0 | 373 | } |
aoqi@0 | 374 | |
aoqi@0 | 375 | // Update statistics |
aoqi@0 | 376 | _resize_count++; |
aoqi@0 | 377 | |
aoqi@0 | 378 | // Allocate the new table. The new table will be populated by workers |
aoqi@0 | 379 | // calling unlink_or_oops_do() and finally installed by finish_resize(). |
aoqi@0 | 380 | return new G1StringDedupTable(size, _table->_hash_seed); |
aoqi@0 | 381 | } |
aoqi@0 | 382 | |
aoqi@0 | 383 | void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) { |
aoqi@0 | 384 | assert(resized_table != NULL, "Invalid table"); |
aoqi@0 | 385 | |
aoqi@0 | 386 | resized_table->_entries = _table->_entries; |
aoqi@0 | 387 | |
aoqi@0 | 388 | // Free old table |
aoqi@0 | 389 | delete _table; |
aoqi@0 | 390 | |
aoqi@0 | 391 | // Install new table |
aoqi@0 | 392 | _table = resized_table; |
aoqi@0 | 393 | } |
aoqi@0 | 394 | |
aoqi@0 | 395 | void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) { |
aoqi@0 | 396 | // The table is divided into partitions to allow lock-less parallel processing by |
aoqi@0 | 397 | // multiple worker threads. A worker thread first claims a partition, which ensures |
aoqi@0 | 398 | // exclusive access to that part of the table, then continues to process it. To allow |
aoqi@0 | 399 | // shrinking of the table in parallel we also need to make sure that the same worker |
aoqi@0 | 400 | // thread processes all partitions where entries will hash to the same destination |
aoqi@0 | 401 | // partition. Since the table size is always a power of two and we always shrink by |
aoqi@0 | 402 | // dividing the table in half, we know that for a given partition there is only one |
aoqi@0 | 403 | // other partition whoes entries will hash to the same destination partition. That |
aoqi@0 | 404 | // other partition is always the sibling partition in the second half of the table. |
aoqi@0 | 405 | // For example, if the table is divided into 8 partitions, the sibling of partition 0 |
aoqi@0 | 406 | // is partition 4, the sibling of partition 1 is partition 5, etc. |
aoqi@0 | 407 | size_t table_half = _table->_size / 2; |
aoqi@0 | 408 | |
aoqi@0 | 409 | // Let each partition be one page worth of buckets |
aoqi@0 | 410 | size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*)); |
aoqi@0 | 411 | assert(table_half % partition_size == 0, "Invalid partition size"); |
aoqi@0 | 412 | |
aoqi@0 | 413 | // Number of entries removed during the scan |
aoqi@0 | 414 | uintx removed = 0; |
aoqi@0 | 415 | |
aoqi@0 | 416 | for (;;) { |
aoqi@0 | 417 | // Grab next partition to scan |
aoqi@0 | 418 | size_t partition_begin = cl->claim_table_partition(partition_size); |
aoqi@0 | 419 | size_t partition_end = partition_begin + partition_size; |
aoqi@0 | 420 | if (partition_begin >= table_half) { |
aoqi@0 | 421 | // End of table |
aoqi@0 | 422 | break; |
aoqi@0 | 423 | } |
aoqi@0 | 424 | |
aoqi@0 | 425 | // Scan the partition followed by the sibling partition in the second half of the table |
aoqi@0 | 426 | removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id); |
aoqi@0 | 427 | removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id); |
aoqi@0 | 428 | } |
aoqi@0 | 429 | |
aoqi@0 | 430 | // Delayed update avoid contention on the table lock |
aoqi@0 | 431 | if (removed > 0) { |
aoqi@0 | 432 | MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); |
aoqi@0 | 433 | _table->_entries -= removed; |
aoqi@0 | 434 | _entries_removed += removed; |
aoqi@0 | 435 | } |
aoqi@0 | 436 | } |
aoqi@0 | 437 | |
aoqi@0 | 438 | uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, |
aoqi@0 | 439 | size_t partition_begin, |
aoqi@0 | 440 | size_t partition_end, |
aoqi@0 | 441 | uint worker_id) { |
aoqi@0 | 442 | uintx removed = 0; |
aoqi@0 | 443 | for (size_t bucket = partition_begin; bucket < partition_end; bucket++) { |
aoqi@0 | 444 | G1StringDedupEntry** entry = _table->bucket(bucket); |
aoqi@0 | 445 | while (*entry != NULL) { |
aoqi@0 | 446 | oop* p = (oop*)(*entry)->obj_addr(); |
aoqi@0 | 447 | if (cl->is_alive(*p)) { |
aoqi@0 | 448 | cl->keep_alive(p); |
aoqi@0 | 449 | if (cl->is_resizing()) { |
aoqi@0 | 450 | // We are resizing the table, transfer entry to the new table |
aoqi@0 | 451 | _table->transfer(entry, cl->resized_table()); |
aoqi@0 | 452 | } else { |
aoqi@0 | 453 | if (cl->is_rehashing()) { |
aoqi@0 | 454 | // We are rehashing the table, rehash the entry but keep it |
aoqi@0 | 455 | // in the table. We can't transfer entries into the new table |
aoqi@0 | 456 | // at this point since we don't have exclusive access to all |
aoqi@0 | 457 | // destination partitions. finish_rehash() will do a single |
aoqi@0 | 458 | // threaded transfer of all entries. |
aoqi@0 | 459 | typeArrayOop value = (typeArrayOop)*p; |
aoqi@0 | 460 | unsigned int hash = hash_code(value); |
aoqi@0 | 461 | (*entry)->set_hash(hash); |
aoqi@0 | 462 | } |
aoqi@0 | 463 | |
aoqi@0 | 464 | // Move to next entry |
aoqi@0 | 465 | entry = (*entry)->next_addr(); |
aoqi@0 | 466 | } |
aoqi@0 | 467 | } else { |
aoqi@0 | 468 | // Not alive, remove entry from table |
aoqi@0 | 469 | _table->remove(entry, worker_id); |
aoqi@0 | 470 | removed++; |
aoqi@0 | 471 | } |
aoqi@0 | 472 | } |
aoqi@0 | 473 | } |
aoqi@0 | 474 | |
aoqi@0 | 475 | return removed; |
aoqi@0 | 476 | } |
aoqi@0 | 477 | |
aoqi@0 | 478 | G1StringDedupTable* G1StringDedupTable::prepare_rehash() { |
aoqi@0 | 479 | if (!_table->_rehash_needed && !StringDeduplicationRehashALot) { |
aoqi@0 | 480 | // Rehash not needed |
aoqi@0 | 481 | return NULL; |
aoqi@0 | 482 | } |
aoqi@0 | 483 | |
aoqi@0 | 484 | // Update statistics |
aoqi@0 | 485 | _rehash_count++; |
aoqi@0 | 486 | |
aoqi@0 | 487 | // Compute new hash seed |
aoqi@0 | 488 | _table->_hash_seed = AltHashing::compute_seed(); |
aoqi@0 | 489 | |
aoqi@0 | 490 | // Allocate the new table, same size and hash seed |
aoqi@0 | 491 | return new G1StringDedupTable(_table->_size, _table->_hash_seed); |
aoqi@0 | 492 | } |
aoqi@0 | 493 | |
aoqi@0 | 494 | void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) { |
aoqi@0 | 495 | assert(rehashed_table != NULL, "Invalid table"); |
aoqi@0 | 496 | |
aoqi@0 | 497 | // Move all newly rehashed entries into the correct buckets in the new table |
aoqi@0 | 498 | for (size_t bucket = 0; bucket < _table->_size; bucket++) { |
aoqi@0 | 499 | G1StringDedupEntry** entry = _table->bucket(bucket); |
aoqi@0 | 500 | while (*entry != NULL) { |
aoqi@0 | 501 | _table->transfer(entry, rehashed_table); |
aoqi@0 | 502 | } |
aoqi@0 | 503 | } |
aoqi@0 | 504 | |
aoqi@0 | 505 | rehashed_table->_entries = _table->_entries; |
aoqi@0 | 506 | |
aoqi@0 | 507 | // Free old table |
aoqi@0 | 508 | delete _table; |
aoqi@0 | 509 | |
aoqi@0 | 510 | // Install new table |
aoqi@0 | 511 | _table = rehashed_table; |
aoqi@0 | 512 | } |
aoqi@0 | 513 | |
aoqi@0 | 514 | void G1StringDedupTable::verify() { |
aoqi@0 | 515 | for (size_t bucket = 0; bucket < _table->_size; bucket++) { |
aoqi@0 | 516 | // Verify entries |
aoqi@0 | 517 | G1StringDedupEntry** entry = _table->bucket(bucket); |
aoqi@0 | 518 | while (*entry != NULL) { |
aoqi@0 | 519 | typeArrayOop value = (*entry)->obj(); |
aoqi@0 | 520 | guarantee(value != NULL, "Object must not be NULL"); |
aoqi@0 | 521 | guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap"); |
aoqi@0 | 522 | guarantee(!value->is_forwarded(), "Object must not be forwarded"); |
aoqi@0 | 523 | guarantee(value->is_typeArray(), "Object must be a typeArrayOop"); |
aoqi@0 | 524 | unsigned int hash = hash_code(value); |
aoqi@0 | 525 | guarantee((*entry)->hash() == hash, "Table entry has inorrect hash"); |
aoqi@0 | 526 | guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index"); |
aoqi@0 | 527 | entry = (*entry)->next_addr(); |
aoqi@0 | 528 | } |
aoqi@0 | 529 | |
aoqi@0 | 530 | // Verify that we do not have entries with identical oops or identical arrays. |
aoqi@0 | 531 | // We only need to compare entries in the same bucket. If the same oop or an |
aoqi@0 | 532 | // identical array has been inserted more than once into different/incorrect |
aoqi@0 | 533 | // buckets the verification step above will catch that. |
aoqi@0 | 534 | G1StringDedupEntry** entry1 = _table->bucket(bucket); |
aoqi@0 | 535 | while (*entry1 != NULL) { |
aoqi@0 | 536 | typeArrayOop value1 = (*entry1)->obj(); |
aoqi@0 | 537 | G1StringDedupEntry** entry2 = (*entry1)->next_addr(); |
aoqi@0 | 538 | while (*entry2 != NULL) { |
aoqi@0 | 539 | typeArrayOop value2 = (*entry2)->obj(); |
aoqi@0 | 540 | guarantee(!equals(value1, value2), "Table entries must not have identical arrays"); |
aoqi@0 | 541 | entry2 = (*entry2)->next_addr(); |
aoqi@0 | 542 | } |
aoqi@0 | 543 | entry1 = (*entry1)->next_addr(); |
aoqi@0 | 544 | } |
aoqi@0 | 545 | } |
aoqi@0 | 546 | } |
aoqi@0 | 547 | |
aoqi@0 | 548 | void G1StringDedupTable::trim_entry_cache() { |
aoqi@0 | 549 | MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); |
aoqi@0 | 550 | size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor); |
aoqi@0 | 551 | _entry_cache->trim(max_cache_size); |
aoqi@0 | 552 | } |
aoqi@0 | 553 | |
aoqi@0 | 554 | void G1StringDedupTable::print_statistics(outputStream* st) { |
aoqi@0 | 555 | st->print_cr( |
aoqi@0 | 556 | " [Table]\n" |
aoqi@0 | 557 | " [Memory Usage: "G1_STRDEDUP_BYTES_FORMAT_NS"]\n" |
aoqi@0 | 558 | " [Size: "SIZE_FORMAT", Min: "SIZE_FORMAT", Max: "SIZE_FORMAT"]\n" |
aoqi@0 | 559 | " [Entries: "UINTX_FORMAT", Load: "G1_STRDEDUP_PERCENT_FORMAT_NS", Cached: " UINTX_FORMAT ", Added: "UINTX_FORMAT", Removed: "UINTX_FORMAT"]\n" |
aoqi@0 | 560 | " [Resize Count: "UINTX_FORMAT", Shrink Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS"), Grow Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS")]\n" |
aoqi@0 | 561 | " [Rehash Count: "UINTX_FORMAT", Rehash Threshold: "UINTX_FORMAT", Hash Seed: 0x%x]\n" |
aoqi@0 | 562 | " [Age Threshold: "UINTX_FORMAT"]", |
aoqi@0 | 563 | G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)), |
aoqi@0 | 564 | _table->_size, _min_size, _max_size, |
aoqi@0 | 565 | _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed, |
aoqi@0 | 566 | _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0, |
aoqi@0 | 567 | _rehash_count, _rehash_threshold, _table->_hash_seed, |
aoqi@0 | 568 | StringDeduplicationAgeThreshold); |
aoqi@0 | 569 | } |