aoqi@0: /* vkempik@8452: * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "classfile/altHashing.hpp" aoqi@0: #include "classfile/javaClasses.hpp" aoqi@0: #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" aoqi@0: #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" aoqi@0: #include "gc_implementation/g1/g1StringDedupTable.hpp" vkempik@8452: #include "gc_implementation/shared/concurrentGCThread.hpp" aoqi@0: #include "memory/gcLocker.hpp" aoqi@0: #include "memory/padded.inline.hpp" aoqi@0: #include "oops/typeArrayOop.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: aoqi@0: // vkempik@8452: // List of deduplication table entries. Links table aoqi@0: // entries together using their _next fields. aoqi@0: // vkempik@8452: class G1StringDedupEntryList : public CHeapObj { aoqi@0: private: aoqi@0: G1StringDedupEntry* _list; aoqi@0: size_t _length; aoqi@0: aoqi@0: public: vkempik@8452: G1StringDedupEntryList() : aoqi@0: _list(NULL), aoqi@0: _length(0) { aoqi@0: } aoqi@0: aoqi@0: void add(G1StringDedupEntry* entry) { aoqi@0: entry->set_next(_list); aoqi@0: _list = entry; aoqi@0: _length++; aoqi@0: } aoqi@0: aoqi@0: G1StringDedupEntry* remove() { aoqi@0: G1StringDedupEntry* entry = _list; aoqi@0: if (entry != NULL) { aoqi@0: _list = entry->next(); aoqi@0: _length--; aoqi@0: } aoqi@0: return entry; aoqi@0: } aoqi@0: vkempik@8452: G1StringDedupEntry* remove_all() { vkempik@8452: G1StringDedupEntry* list = _list; vkempik@8452: _list = NULL; vkempik@8452: return list; vkempik@8452: } vkempik@8452: aoqi@0: size_t length() { aoqi@0: return _length; aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // aoqi@0: // Cache of deduplication table entries. This cache provides fast allocation and aoqi@0: // reuse of table entries to lower the pressure on the underlying allocator. aoqi@0: // But more importantly, it provides fast/deferred freeing of table entries. This aoqi@0: // is important because freeing of table entries is done during stop-the-world aoqi@0: // phases and it is not uncommon for large number of entries to be freed at once. aoqi@0: // Tables entries that are freed during these phases are placed onto a freelist in aoqi@0: // the cache. The deduplication thread, which executes in a concurrent phase, will aoqi@0: // later reuse or free the underlying memory for these entries. aoqi@0: // aoqi@0: // The cache allows for single-threaded allocations and multi-threaded frees. aoqi@0: // Allocations are synchronized by StringDedupTable_lock as part of a table aoqi@0: // modification. aoqi@0: // aoqi@0: class G1StringDedupEntryCache : public CHeapObj { aoqi@0: private: vkempik@8452: // One cache/overflow list per GC worker to allow lock less freeing of vkempik@8452: // entries while doing a parallel scan of the table. Using PaddedEnd to vkempik@8452: // avoid false sharing. vkempik@8452: size_t _nlists; vkempik@8452: size_t _max_list_length; vkempik@8452: PaddedEnd* _cached; vkempik@8452: PaddedEnd* _overflowed; aoqi@0: aoqi@0: public: vkempik@8452: G1StringDedupEntryCache(size_t max_size); aoqi@0: ~G1StringDedupEntryCache(); aoqi@0: vkempik@8452: // Set max number of table entries to cache. vkempik@8452: void set_max_size(size_t max_size); vkempik@8452: vkempik@8452: // Get a table entry from the cache, or allocate a new entry if the cache is empty. aoqi@0: G1StringDedupEntry* alloc(); aoqi@0: vkempik@8452: // Insert a table entry into the cache. aoqi@0: void free(G1StringDedupEntry* entry, uint worker_id); aoqi@0: aoqi@0: // Returns current number of entries in the cache. aoqi@0: size_t size(); aoqi@0: vkempik@8452: // Deletes overflowed entries. vkempik@8452: void delete_overflowed(); aoqi@0: }; aoqi@0: vkempik@8452: G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) : vkempik@8452: _nlists(MAX2(ParallelGCThreads, (size_t)1)), vkempik@8452: _max_list_length(0), vkempik@8452: _cached(PaddedArray::create_unfreeable((uint)_nlists)), vkempik@8452: _overflowed(PaddedArray::create_unfreeable((uint)_nlists)) { vkempik@8452: set_max_size(max_size); aoqi@0: } aoqi@0: aoqi@0: G1StringDedupEntryCache::~G1StringDedupEntryCache() { aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: vkempik@8452: void G1StringDedupEntryCache::set_max_size(size_t size) { vkempik@8452: _max_list_length = size / _nlists; vkempik@8452: } vkempik@8452: aoqi@0: G1StringDedupEntry* G1StringDedupEntryCache::alloc() { aoqi@0: for (size_t i = 0; i < _nlists; i++) { vkempik@8452: G1StringDedupEntry* entry = _cached[i].remove(); aoqi@0: if (entry != NULL) { aoqi@0: return entry; aoqi@0: } aoqi@0: } aoqi@0: return new G1StringDedupEntry(); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) { aoqi@0: assert(entry->obj() != NULL, "Double free"); aoqi@0: assert(worker_id < _nlists, "Invalid worker id"); vkempik@8452: aoqi@0: entry->set_obj(NULL); vkempik@10009: entry->set_hash(0); vkempik@8452: vkempik@8452: if (_cached[worker_id].length() < _max_list_length) { vkempik@8452: // Cache is not full vkempik@8452: _cached[worker_id].add(entry); vkempik@8452: } else { vkempik@8452: // Cache is full, add to overflow list for later deletion vkempik@8452: _overflowed[worker_id].add(entry); vkempik@8452: } aoqi@0: } aoqi@0: aoqi@0: size_t G1StringDedupEntryCache::size() { aoqi@0: size_t size = 0; aoqi@0: for (size_t i = 0; i < _nlists; i++) { vkempik@8452: size += _cached[i].length(); aoqi@0: } aoqi@0: return size; aoqi@0: } aoqi@0: vkempik@8452: void G1StringDedupEntryCache::delete_overflowed() { vkempik@8452: double start = os::elapsedTime(); vkempik@8452: uintx count = 0; vkempik@8452: aoqi@0: for (size_t i = 0; i < _nlists; i++) { vkempik@8452: G1StringDedupEntry* entry; vkempik@8452: vkempik@8452: { vkempik@8452: // The overflow list can be modified during safepoints, therefore vkempik@8452: // we temporarily join the suspendible thread set while removing vkempik@8452: // all entries from the list. vkempik@8452: SuspendibleThreadSetJoiner sts_join; vkempik@8452: entry = _overflowed[i].remove_all(); vkempik@8452: } vkempik@8452: vkempik@8452: // Delete all entries vkempik@8452: while (entry != NULL) { vkempik@8452: G1StringDedupEntry* next = entry->next(); aoqi@0: delete entry; vkempik@8452: entry = next; vkempik@8452: count++; aoqi@0: } aoqi@0: } vkempik@8452: vkempik@8452: double end = os::elapsedTime(); vkempik@8452: if (PrintStringDeduplicationStatistics) { vkempik@8452: gclog_or_tty->print_cr("[GC concurrent-string-deduplication, deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT "]", count, end - start); vkempik@8452: } aoqi@0: } aoqi@0: aoqi@0: G1StringDedupTable* G1StringDedupTable::_table = NULL; aoqi@0: G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL; aoqi@0: aoqi@0: const size_t G1StringDedupTable::_min_size = (1 << 10); // 1024 aoqi@0: const size_t G1StringDedupTable::_max_size = (1 << 24); // 16777216 aoqi@0: const double G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load aoqi@0: const double G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load aoqi@0: const double G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size aoqi@0: const uintx G1StringDedupTable::_rehash_multiple = 60; // Hash bucket has 60 times more collisions than expected aoqi@0: const uintx G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor); aoqi@0: aoqi@0: uintx G1StringDedupTable::_entries_added = 0; aoqi@0: uintx G1StringDedupTable::_entries_removed = 0; aoqi@0: uintx G1StringDedupTable::_resize_count = 0; aoqi@0: uintx G1StringDedupTable::_rehash_count = 0; aoqi@0: vkempik@10008: G1StringDedupTable::G1StringDedupTable(size_t size, uint64_t hash_seed) : aoqi@0: _size(size), aoqi@0: _entries(0), aoqi@0: _grow_threshold((uintx)(size * _grow_load_factor)), aoqi@0: _shrink_threshold((uintx)(size * _shrink_load_factor)), aoqi@0: _rehash_needed(false), aoqi@0: _hash_seed(hash_seed) { aoqi@0: assert(is_power_of_2(size), "Table size must be a power of 2"); aoqi@0: _buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC); aoqi@0: memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*)); aoqi@0: } aoqi@0: aoqi@0: G1StringDedupTable::~G1StringDedupTable() { aoqi@0: FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets, mtGC); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::create() { aoqi@0: assert(_table == NULL, "One string deduplication table allowed"); vkempik@8452: _entry_cache = new G1StringDedupEntryCache((size_t)(_min_size * _max_cache_factor)); aoqi@0: _table = new G1StringDedupTable(_min_size); aoqi@0: } aoqi@0: vkempik@10009: void G1StringDedupTable::add(typeArrayOop value, unsigned int hash, G1StringDedupEntry** list) { aoqi@0: G1StringDedupEntry* entry = _entry_cache->alloc(); aoqi@0: entry->set_obj(value); vkempik@10009: entry->set_hash(hash); aoqi@0: entry->set_next(*list); aoqi@0: *list = entry; aoqi@0: _entries++; aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) { aoqi@0: G1StringDedupEntry* entry = *pentry; aoqi@0: *pentry = entry->next(); aoqi@0: _entry_cache->free(entry, worker_id); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) { aoqi@0: G1StringDedupEntry* entry = *pentry; aoqi@0: *pentry = entry->next(); vkempik@10009: unsigned int hash = entry->hash(); aoqi@0: size_t index = dest->hash_to_index(hash); aoqi@0: G1StringDedupEntry** list = dest->bucket(index); aoqi@0: entry->set_next(*list); aoqi@0: *list = entry; aoqi@0: } aoqi@0: aoqi@0: bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) { aoqi@0: return (value1 == value2 || aoqi@0: (value1->length() == value2->length() && aoqi@0: (!memcmp(value1->base(T_CHAR), aoqi@0: value2->base(T_CHAR), aoqi@0: value1->length() * sizeof(jchar))))); aoqi@0: } aoqi@0: vkempik@10009: typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, unsigned int hash, aoqi@0: G1StringDedupEntry** list, uintx &count) { aoqi@0: for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) { vkempik@10009: if (entry->hash() == hash) { aoqi@0: typeArrayOop existing_value = entry->obj(); aoqi@0: if (equals(value, existing_value)) { aoqi@0: // Match found aoqi@0: return existing_value; aoqi@0: } aoqi@0: } aoqi@0: count++; aoqi@0: } aoqi@0: aoqi@0: // Not found aoqi@0: return NULL; aoqi@0: } aoqi@0: vkempik@10009: typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, unsigned int hash) { aoqi@0: size_t index = hash_to_index(hash); aoqi@0: G1StringDedupEntry** list = bucket(index); aoqi@0: uintx count = 0; aoqi@0: aoqi@0: // Lookup in list aoqi@0: typeArrayOop existing_value = lookup(value, hash, list, count); aoqi@0: aoqi@0: // Check if rehash is needed aoqi@0: if (count > _rehash_threshold) { aoqi@0: _rehash_needed = true; aoqi@0: } aoqi@0: aoqi@0: if (existing_value == NULL) { aoqi@0: // Not found, add new entry aoqi@0: add(value, hash, list); aoqi@0: aoqi@0: // Update statistics aoqi@0: _entries_added++; aoqi@0: } aoqi@0: aoqi@0: return existing_value; aoqi@0: } aoqi@0: vkempik@10009: unsigned int G1StringDedupTable::hash_code(typeArrayOop value) { aoqi@0: unsigned int hash; aoqi@0: int length = value->length(); aoqi@0: const jchar* data = (jchar*)value->base(T_CHAR); aoqi@0: vkempik@10009: if (use_java_hash()) { vkempik@10009: hash = java_lang_String::hash_code(data, length); vkempik@10009: } else { vkempik@10009: hash = AltHashing::halfsiphash_32(_table->_hash_seed, (const uint16_t*)data, length); vkempik@10009: } aoqi@0: return hash; aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) { aoqi@0: assert(java_lang_String::is_instance(java_string), "Must be a string"); aoqi@0: No_Safepoint_Verifier nsv; aoqi@0: aoqi@0: stat.inc_inspected(); aoqi@0: aoqi@0: typeArrayOop value = java_lang_String::value(java_string); aoqi@0: if (value == NULL) { aoqi@0: // String has no value aoqi@0: stat.inc_skipped(); aoqi@0: return; aoqi@0: } aoqi@0: vkempik@10009: unsigned int hash = 0; aoqi@0: aoqi@0: if (use_java_hash()) { aoqi@0: // Get hash code from cache aoqi@0: hash = java_lang_String::hash(java_string); aoqi@0: } aoqi@0: aoqi@0: if (hash == 0) { aoqi@0: // Compute hash vkempik@10009: hash = hash_code(value); aoqi@0: stat.inc_hashed(); aoqi@0: } aoqi@0: aoqi@0: if (use_java_hash() && hash != 0) { aoqi@0: // Store hash code in cache aoqi@0: java_lang_String::set_hash(java_string, hash); aoqi@0: } aoqi@0: aoqi@0: typeArrayOop existing_value = lookup_or_add(value, hash); aoqi@0: if (existing_value == value) { aoqi@0: // Same value, already known aoqi@0: stat.inc_known(); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: // Get size of value array aoqi@0: uintx size_in_bytes = value->size() * HeapWordSize; aoqi@0: stat.inc_new(size_in_bytes); aoqi@0: aoqi@0: if (existing_value != NULL) { aoqi@0: // Enqueue the reference to make sure it is kept alive. Concurrent mark might aoqi@0: // otherwise declare it dead if there are no other strong references to this object. aoqi@0: G1SATBCardTableModRefBS::enqueue(existing_value); aoqi@0: aoqi@0: // Existing value found, deduplicate string aoqi@0: java_lang_String::set_value(java_string, existing_value); aoqi@0: aoqi@0: if (G1CollectedHeap::heap()->is_in_young(value)) { aoqi@0: stat.inc_deduped_young(size_in_bytes); aoqi@0: } else { aoqi@0: stat.inc_deduped_old(size_in_bytes); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: G1StringDedupTable* G1StringDedupTable::prepare_resize() { aoqi@0: size_t size = _table->_size; aoqi@0: aoqi@0: // Check if the hashtable needs to be resized aoqi@0: if (_table->_entries > _table->_grow_threshold) { aoqi@0: // Grow table, double the size aoqi@0: size *= 2; aoqi@0: if (size > _max_size) { aoqi@0: // Too big, don't resize aoqi@0: return NULL; aoqi@0: } aoqi@0: } else if (_table->_entries < _table->_shrink_threshold) { aoqi@0: // Shrink table, half the size aoqi@0: size /= 2; aoqi@0: if (size < _min_size) { aoqi@0: // Too small, don't resize aoqi@0: return NULL; aoqi@0: } aoqi@0: } else if (StringDeduplicationResizeALot) { aoqi@0: // Force grow aoqi@0: size *= 2; aoqi@0: if (size > _max_size) { aoqi@0: // Too big, force shrink instead aoqi@0: size /= 4; aoqi@0: } aoqi@0: } else { aoqi@0: // Resize not needed aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // Update statistics aoqi@0: _resize_count++; aoqi@0: vkempik@8452: // Update max cache size vkempik@8452: _entry_cache->set_max_size((size_t)(size * _max_cache_factor)); vkempik@8452: aoqi@0: // Allocate the new table. The new table will be populated by workers aoqi@0: // calling unlink_or_oops_do() and finally installed by finish_resize(). aoqi@0: return new G1StringDedupTable(size, _table->_hash_seed); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) { aoqi@0: assert(resized_table != NULL, "Invalid table"); aoqi@0: aoqi@0: resized_table->_entries = _table->_entries; aoqi@0: aoqi@0: // Free old table aoqi@0: delete _table; aoqi@0: aoqi@0: // Install new table aoqi@0: _table = resized_table; aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) { aoqi@0: // The table is divided into partitions to allow lock-less parallel processing by aoqi@0: // multiple worker threads. A worker thread first claims a partition, which ensures aoqi@0: // exclusive access to that part of the table, then continues to process it. To allow aoqi@0: // shrinking of the table in parallel we also need to make sure that the same worker aoqi@0: // thread processes all partitions where entries will hash to the same destination aoqi@0: // partition. Since the table size is always a power of two and we always shrink by aoqi@0: // dividing the table in half, we know that for a given partition there is only one aoqi@0: // other partition whoes entries will hash to the same destination partition. That aoqi@0: // other partition is always the sibling partition in the second half of the table. aoqi@0: // For example, if the table is divided into 8 partitions, the sibling of partition 0 aoqi@0: // is partition 4, the sibling of partition 1 is partition 5, etc. aoqi@0: size_t table_half = _table->_size / 2; aoqi@0: aoqi@0: // Let each partition be one page worth of buckets aoqi@0: size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*)); aoqi@0: assert(table_half % partition_size == 0, "Invalid partition size"); aoqi@0: aoqi@0: // Number of entries removed during the scan aoqi@0: uintx removed = 0; aoqi@0: aoqi@0: for (;;) { aoqi@0: // Grab next partition to scan aoqi@0: size_t partition_begin = cl->claim_table_partition(partition_size); aoqi@0: size_t partition_end = partition_begin + partition_size; aoqi@0: if (partition_begin >= table_half) { aoqi@0: // End of table aoqi@0: break; aoqi@0: } aoqi@0: aoqi@0: // Scan the partition followed by the sibling partition in the second half of the table aoqi@0: removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id); aoqi@0: removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id); aoqi@0: } aoqi@0: vkempik@8452: // Delayed update to avoid contention on the table lock aoqi@0: if (removed > 0) { aoqi@0: MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag); aoqi@0: _table->_entries -= removed; aoqi@0: _entries_removed += removed; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, aoqi@0: size_t partition_begin, aoqi@0: size_t partition_end, aoqi@0: uint worker_id) { aoqi@0: uintx removed = 0; aoqi@0: for (size_t bucket = partition_begin; bucket < partition_end; bucket++) { aoqi@0: G1StringDedupEntry** entry = _table->bucket(bucket); aoqi@0: while (*entry != NULL) { aoqi@0: oop* p = (oop*)(*entry)->obj_addr(); aoqi@0: if (cl->is_alive(*p)) { aoqi@0: cl->keep_alive(p); aoqi@0: if (cl->is_resizing()) { aoqi@0: // We are resizing the table, transfer entry to the new table aoqi@0: _table->transfer(entry, cl->resized_table()); aoqi@0: } else { aoqi@0: if (cl->is_rehashing()) { aoqi@0: // We are rehashing the table, rehash the entry but keep it aoqi@0: // in the table. We can't transfer entries into the new table aoqi@0: // at this point since we don't have exclusive access to all aoqi@0: // destination partitions. finish_rehash() will do a single aoqi@0: // threaded transfer of all entries. aoqi@0: typeArrayOop value = (typeArrayOop)*p; vkempik@10009: unsigned int hash = hash_code(value); vkempik@10009: (*entry)->set_hash(hash); aoqi@0: } aoqi@0: aoqi@0: // Move to next entry aoqi@0: entry = (*entry)->next_addr(); aoqi@0: } aoqi@0: } else { aoqi@0: // Not alive, remove entry from table aoqi@0: _table->remove(entry, worker_id); aoqi@0: removed++; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return removed; aoqi@0: } aoqi@0: aoqi@0: G1StringDedupTable* G1StringDedupTable::prepare_rehash() { aoqi@0: if (!_table->_rehash_needed && !StringDeduplicationRehashALot) { aoqi@0: // Rehash not needed aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // Update statistics aoqi@0: _rehash_count++; aoqi@0: aoqi@0: // Compute new hash seed aoqi@0: _table->_hash_seed = AltHashing::compute_seed(); aoqi@0: aoqi@0: // Allocate the new table, same size and hash seed aoqi@0: return new G1StringDedupTable(_table->_size, _table->_hash_seed); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) { aoqi@0: assert(rehashed_table != NULL, "Invalid table"); aoqi@0: aoqi@0: // Move all newly rehashed entries into the correct buckets in the new table aoqi@0: for (size_t bucket = 0; bucket < _table->_size; bucket++) { aoqi@0: G1StringDedupEntry** entry = _table->bucket(bucket); aoqi@0: while (*entry != NULL) { aoqi@0: _table->transfer(entry, rehashed_table); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: rehashed_table->_entries = _table->_entries; aoqi@0: aoqi@0: // Free old table aoqi@0: delete _table; aoqi@0: aoqi@0: // Install new table aoqi@0: _table = rehashed_table; aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::verify() { aoqi@0: for (size_t bucket = 0; bucket < _table->_size; bucket++) { aoqi@0: // Verify entries aoqi@0: G1StringDedupEntry** entry = _table->bucket(bucket); aoqi@0: while (*entry != NULL) { aoqi@0: typeArrayOop value = (*entry)->obj(); aoqi@0: guarantee(value != NULL, "Object must not be NULL"); aoqi@0: guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap"); aoqi@0: guarantee(!value->is_forwarded(), "Object must not be forwarded"); aoqi@0: guarantee(value->is_typeArray(), "Object must be a typeArrayOop"); vkempik@10009: unsigned int hash = hash_code(value); vkempik@10009: guarantee((*entry)->hash() == hash, "Table entry has inorrect hash"); aoqi@0: guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index"); aoqi@0: entry = (*entry)->next_addr(); aoqi@0: } aoqi@0: aoqi@0: // Verify that we do not have entries with identical oops or identical arrays. aoqi@0: // We only need to compare entries in the same bucket. If the same oop or an aoqi@0: // identical array has been inserted more than once into different/incorrect aoqi@0: // buckets the verification step above will catch that. aoqi@0: G1StringDedupEntry** entry1 = _table->bucket(bucket); aoqi@0: while (*entry1 != NULL) { aoqi@0: typeArrayOop value1 = (*entry1)->obj(); aoqi@0: G1StringDedupEntry** entry2 = (*entry1)->next_addr(); aoqi@0: while (*entry2 != NULL) { aoqi@0: typeArrayOop value2 = (*entry2)->obj(); vkempik@10009: guarantee(!equals(value1, value2), "Table entries must not have identical arrays"); aoqi@0: entry2 = (*entry2)->next_addr(); aoqi@0: } aoqi@0: entry1 = (*entry1)->next_addr(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: vkempik@8452: void G1StringDedupTable::clean_entry_cache() { vkempik@8452: _entry_cache->delete_overflowed(); aoqi@0: } aoqi@0: aoqi@0: void G1StringDedupTable::print_statistics(outputStream* st) { aoqi@0: st->print_cr( aoqi@0: " [Table]\n" kevinw@9327: " [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]\n" kevinw@9327: " [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]\n" kevinw@9327: " [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]\n" kevinw@9327: " [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]\n" vkempik@10009: " [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: " UINT64_FORMAT "]\n" kevinw@9327: " [Age Threshold: " UINTX_FORMAT "]", aoqi@0: G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)), aoqi@0: _table->_size, _min_size, _max_size, aoqi@0: _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed, aoqi@0: _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0, aoqi@0: _rehash_count, _rehash_threshold, _table->_hash_seed, aoqi@0: StringDeduplicationAgeThreshold); aoqi@0: }