src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp

changeset 6413
595c0f60d50d
parent 0
f90c822e73f8
child 8452
04a62a3d51d7
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp	Tue Mar 18 19:07:22 2014 +0100
     1.3 @@ -0,0 +1,569 @@
     1.4 +/*
     1.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "classfile/altHashing.hpp"
    1.30 +#include "classfile/javaClasses.hpp"
    1.31 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.32 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    1.33 +#include "gc_implementation/g1/g1StringDedupTable.hpp"
    1.34 +#include "memory/gcLocker.hpp"
    1.35 +#include "memory/padded.inline.hpp"
    1.36 +#include "oops/typeArrayOop.hpp"
    1.37 +#include "runtime/mutexLocker.hpp"
    1.38 +
    1.39 +//
    1.40 +// Freelist in the deduplication table entry cache. Links table
    1.41 +// entries together using their _next fields.
    1.42 +//
    1.43 +class G1StringDedupEntryFreeList : public CHeapObj<mtGC> {
    1.44 +private:
    1.45 +  G1StringDedupEntry* _list;
    1.46 +  size_t              _length;
    1.47 +
    1.48 +public:
    1.49 +  G1StringDedupEntryFreeList() :
    1.50 +    _list(NULL),
    1.51 +    _length(0) {
    1.52 +  }
    1.53 +
    1.54 +  void add(G1StringDedupEntry* entry) {
    1.55 +    entry->set_next(_list);
    1.56 +    _list = entry;
    1.57 +    _length++;
    1.58 +  }
    1.59 +
    1.60 +  G1StringDedupEntry* remove() {
    1.61 +    G1StringDedupEntry* entry = _list;
    1.62 +    if (entry != NULL) {
    1.63 +      _list = entry->next();
    1.64 +      _length--;
    1.65 +    }
    1.66 +    return entry;
    1.67 +  }
    1.68 +
    1.69 +  size_t length() {
    1.70 +    return _length;
    1.71 +  }
    1.72 +};
    1.73 +
    1.74 +//
    1.75 +// Cache of deduplication table entries. This cache provides fast allocation and
    1.76 +// reuse of table entries to lower the pressure on the underlying allocator.
    1.77 +// But more importantly, it provides fast/deferred freeing of table entries. This
    1.78 +// is important because freeing of table entries is done during stop-the-world
    1.79 +// phases and it is not uncommon for large number of entries to be freed at once.
    1.80 +// Tables entries that are freed during these phases are placed onto a freelist in
    1.81 +// the cache. The deduplication thread, which executes in a concurrent phase, will
    1.82 +// later reuse or free the underlying memory for these entries.
    1.83 +//
    1.84 +// The cache allows for single-threaded allocations and multi-threaded frees.
    1.85 +// Allocations are synchronized by StringDedupTable_lock as part of a table
    1.86 +// modification.
    1.87 +//
    1.88 +class G1StringDedupEntryCache : public CHeapObj<mtGC> {
    1.89 +private:
    1.90 +  // One freelist per GC worker to allow lock less freeing of
    1.91 +  // entries while doing a parallel scan of the table. Using
    1.92 +  // PaddedEnd to avoid false sharing.
    1.93 +  PaddedEnd<G1StringDedupEntryFreeList>* _lists;
    1.94 +  size_t                                 _nlists;
    1.95 +
    1.96 +public:
    1.97 +  G1StringDedupEntryCache();
    1.98 +  ~G1StringDedupEntryCache();
    1.99 +
   1.100 +  // Get a table entry from the cache freelist, or allocate a new
   1.101 +  // entry if the cache is empty.
   1.102 +  G1StringDedupEntry* alloc();
   1.103 +
   1.104 +  // Insert a table entry into the cache freelist.
   1.105 +  void free(G1StringDedupEntry* entry, uint worker_id);
   1.106 +
   1.107 +  // Returns current number of entries in the cache.
   1.108 +  size_t size();
   1.109 +
   1.110 +  // If the cache has grown above the given max size, trim it down
   1.111 +  // and deallocate the memory occupied by trimmed of entries.
   1.112 +  void trim(size_t max_size);
   1.113 +};
   1.114 +
   1.115 +G1StringDedupEntryCache::G1StringDedupEntryCache() {
   1.116 +  _nlists = MAX2(ParallelGCThreads, (size_t)1);
   1.117 +  _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
   1.118 +}
   1.119 +
   1.120 +G1StringDedupEntryCache::~G1StringDedupEntryCache() {
   1.121 +  ShouldNotReachHere();
   1.122 +}
   1.123 +
   1.124 +G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
   1.125 +  for (size_t i = 0; i < _nlists; i++) {
   1.126 +    G1StringDedupEntry* entry = _lists[i].remove();
   1.127 +    if (entry != NULL) {
   1.128 +      return entry;
   1.129 +    }
   1.130 +  }
   1.131 +  return new G1StringDedupEntry();
   1.132 +}
   1.133 +
   1.134 +void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
   1.135 +  assert(entry->obj() != NULL, "Double free");
   1.136 +  assert(worker_id < _nlists, "Invalid worker id");
   1.137 +  entry->set_obj(NULL);
   1.138 +  entry->set_hash(0);
   1.139 +  _lists[worker_id].add(entry);
   1.140 +}
   1.141 +
   1.142 +size_t G1StringDedupEntryCache::size() {
   1.143 +  size_t size = 0;
   1.144 +  for (size_t i = 0; i < _nlists; i++) {
   1.145 +    size += _lists[i].length();
   1.146 +  }
   1.147 +  return size;
   1.148 +}
   1.149 +
   1.150 +void G1StringDedupEntryCache::trim(size_t max_size) {
   1.151 +  size_t cache_size = 0;
   1.152 +  for (size_t i = 0; i < _nlists; i++) {
   1.153 +    G1StringDedupEntryFreeList* list = &_lists[i];
   1.154 +    cache_size += list->length();
   1.155 +    while (cache_size > max_size) {
   1.156 +      G1StringDedupEntry* entry = list->remove();
   1.157 +      assert(entry != NULL, "Should not be null");
   1.158 +      cache_size--;
   1.159 +      delete entry;
   1.160 +    }
   1.161 +  }
   1.162 +}
   1.163 +
   1.164 +G1StringDedupTable*      G1StringDedupTable::_table = NULL;
   1.165 +G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL;
   1.166 +
   1.167 +const size_t             G1StringDedupTable::_min_size = (1 << 10);   // 1024
   1.168 +const size_t             G1StringDedupTable::_max_size = (1 << 24);   // 16777216
   1.169 +const double             G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load
   1.170 +const double             G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load
   1.171 +const double             G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size
   1.172 +const uintx              G1StringDedupTable::_rehash_multiple = 60;   // Hash bucket has 60 times more collisions than expected
   1.173 +const uintx              G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor);
   1.174 +
   1.175 +uintx                    G1StringDedupTable::_entries_added = 0;
   1.176 +uintx                    G1StringDedupTable::_entries_removed = 0;
   1.177 +uintx                    G1StringDedupTable::_resize_count = 0;
   1.178 +uintx                    G1StringDedupTable::_rehash_count = 0;
   1.179 +
   1.180 +G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) :
   1.181 +  _size(size),
   1.182 +  _entries(0),
   1.183 +  _grow_threshold((uintx)(size * _grow_load_factor)),
   1.184 +  _shrink_threshold((uintx)(size * _shrink_load_factor)),
   1.185 +  _rehash_needed(false),
   1.186 +  _hash_seed(hash_seed) {
   1.187 +  assert(is_power_of_2(size), "Table size must be a power of 2");
   1.188 +  _buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC);
   1.189 +  memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*));
   1.190 +}
   1.191 +
   1.192 +G1StringDedupTable::~G1StringDedupTable() {
   1.193 +  FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets, mtGC);
   1.194 +}
   1.195 +
   1.196 +void G1StringDedupTable::create() {
   1.197 +  assert(_table == NULL, "One string deduplication table allowed");
   1.198 +  _entry_cache = new G1StringDedupEntryCache();
   1.199 +  _table = new G1StringDedupTable(_min_size);
   1.200 +}
   1.201 +
   1.202 +void G1StringDedupTable::add(typeArrayOop value, unsigned int hash, G1StringDedupEntry** list) {
   1.203 +  G1StringDedupEntry* entry = _entry_cache->alloc();
   1.204 +  entry->set_obj(value);
   1.205 +  entry->set_hash(hash);
   1.206 +  entry->set_next(*list);
   1.207 +  *list = entry;
   1.208 +  _entries++;
   1.209 +}
   1.210 +
   1.211 +void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) {
   1.212 +  G1StringDedupEntry* entry = *pentry;
   1.213 +  *pentry = entry->next();
   1.214 +  _entry_cache->free(entry, worker_id);
   1.215 +}
   1.216 +
   1.217 +void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) {
   1.218 +  G1StringDedupEntry* entry = *pentry;
   1.219 +  *pentry = entry->next();
   1.220 +  unsigned int hash = entry->hash();
   1.221 +  size_t index = dest->hash_to_index(hash);
   1.222 +  G1StringDedupEntry** list = dest->bucket(index);
   1.223 +  entry->set_next(*list);
   1.224 +  *list = entry;
   1.225 +}
   1.226 +
   1.227 +bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) {
   1.228 +  return (value1 == value2 ||
   1.229 +          (value1->length() == value2->length() &&
   1.230 +           (!memcmp(value1->base(T_CHAR),
   1.231 +                    value2->base(T_CHAR),
   1.232 +                    value1->length() * sizeof(jchar)))));
   1.233 +}
   1.234 +
   1.235 +typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, unsigned int hash,
   1.236 +                                        G1StringDedupEntry** list, uintx &count) {
   1.237 +  for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) {
   1.238 +    if (entry->hash() == hash) {
   1.239 +      typeArrayOop existing_value = entry->obj();
   1.240 +      if (equals(value, existing_value)) {
   1.241 +        // Match found
   1.242 +        return existing_value;
   1.243 +      }
   1.244 +    }
   1.245 +    count++;
   1.246 +  }
   1.247 +
   1.248 +  // Not found
   1.249 +  return NULL;
   1.250 +}
   1.251 +
   1.252 +typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, unsigned int hash) {
   1.253 +  size_t index = hash_to_index(hash);
   1.254 +  G1StringDedupEntry** list = bucket(index);
   1.255 +  uintx count = 0;
   1.256 +
   1.257 +  // Lookup in list
   1.258 +  typeArrayOop existing_value = lookup(value, hash, list, count);
   1.259 +
   1.260 +  // Check if rehash is needed
   1.261 +  if (count > _rehash_threshold) {
   1.262 +    _rehash_needed = true;
   1.263 +  }
   1.264 +
   1.265 +  if (existing_value == NULL) {
   1.266 +    // Not found, add new entry
   1.267 +    add(value, hash, list);
   1.268 +
   1.269 +    // Update statistics
   1.270 +    _entries_added++;
   1.271 +  }
   1.272 +
   1.273 +  return existing_value;
   1.274 +}
   1.275 +
   1.276 +unsigned int G1StringDedupTable::hash_code(typeArrayOop value) {
   1.277 +  unsigned int hash;
   1.278 +  int length = value->length();
   1.279 +  const jchar* data = (jchar*)value->base(T_CHAR);
   1.280 +
   1.281 +  if (use_java_hash()) {
   1.282 +    hash = java_lang_String::hash_code(data, length);
   1.283 +  } else {
   1.284 +    hash = AltHashing::murmur3_32(_table->_hash_seed, data, length);
   1.285 +  }
   1.286 +
   1.287 +  return hash;
   1.288 +}
   1.289 +
   1.290 +void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) {
   1.291 +  assert(java_lang_String::is_instance(java_string), "Must be a string");
   1.292 +  No_Safepoint_Verifier nsv;
   1.293 +
   1.294 +  stat.inc_inspected();
   1.295 +
   1.296 +  typeArrayOop value = java_lang_String::value(java_string);
   1.297 +  if (value == NULL) {
   1.298 +    // String has no value
   1.299 +    stat.inc_skipped();
   1.300 +    return;
   1.301 +  }
   1.302 +
   1.303 +  unsigned int hash = 0;
   1.304 +
   1.305 +  if (use_java_hash()) {
   1.306 +    // Get hash code from cache
   1.307 +    hash = java_lang_String::hash(java_string);
   1.308 +  }
   1.309 +
   1.310 +  if (hash == 0) {
   1.311 +    // Compute hash
   1.312 +    hash = hash_code(value);
   1.313 +    stat.inc_hashed();
   1.314 +  }
   1.315 +
   1.316 +  if (use_java_hash() && hash != 0) {
   1.317 +    // Store hash code in cache
   1.318 +    java_lang_String::set_hash(java_string, hash);
   1.319 +  }
   1.320 +
   1.321 +  typeArrayOop existing_value = lookup_or_add(value, hash);
   1.322 +  if (existing_value == value) {
   1.323 +    // Same value, already known
   1.324 +    stat.inc_known();
   1.325 +    return;
   1.326 +  }
   1.327 +
   1.328 +  // Get size of value array
   1.329 +  uintx size_in_bytes = value->size() * HeapWordSize;
   1.330 +  stat.inc_new(size_in_bytes);
   1.331 +
   1.332 +  if (existing_value != NULL) {
   1.333 +    // Enqueue the reference to make sure it is kept alive. Concurrent mark might
   1.334 +    // otherwise declare it dead if there are no other strong references to this object.
   1.335 +    G1SATBCardTableModRefBS::enqueue(existing_value);
   1.336 +
   1.337 +    // Existing value found, deduplicate string
   1.338 +    java_lang_String::set_value(java_string, existing_value);
   1.339 +
   1.340 +    if (G1CollectedHeap::heap()->is_in_young(value)) {
   1.341 +      stat.inc_deduped_young(size_in_bytes);
   1.342 +    } else {
   1.343 +      stat.inc_deduped_old(size_in_bytes);
   1.344 +    }
   1.345 +  }
   1.346 +}
   1.347 +
   1.348 +G1StringDedupTable* G1StringDedupTable::prepare_resize() {
   1.349 +  size_t size = _table->_size;
   1.350 +
   1.351 +  // Check if the hashtable needs to be resized
   1.352 +  if (_table->_entries > _table->_grow_threshold) {
   1.353 +    // Grow table, double the size
   1.354 +    size *= 2;
   1.355 +    if (size > _max_size) {
   1.356 +      // Too big, don't resize
   1.357 +      return NULL;
   1.358 +    }
   1.359 +  } else if (_table->_entries < _table->_shrink_threshold) {
   1.360 +    // Shrink table, half the size
   1.361 +    size /= 2;
   1.362 +    if (size < _min_size) {
   1.363 +      // Too small, don't resize
   1.364 +      return NULL;
   1.365 +    }
   1.366 +  } else if (StringDeduplicationResizeALot) {
   1.367 +    // Force grow
   1.368 +    size *= 2;
   1.369 +    if (size > _max_size) {
   1.370 +      // Too big, force shrink instead
   1.371 +      size /= 4;
   1.372 +    }
   1.373 +  } else {
   1.374 +    // Resize not needed
   1.375 +    return NULL;
   1.376 +  }
   1.377 +
   1.378 +  // Update statistics
   1.379 +  _resize_count++;
   1.380 +
   1.381 +  // Allocate the new table. The new table will be populated by workers
   1.382 +  // calling unlink_or_oops_do() and finally installed by finish_resize().
   1.383 +  return new G1StringDedupTable(size, _table->_hash_seed);
   1.384 +}
   1.385 +
   1.386 +void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) {
   1.387 +  assert(resized_table != NULL, "Invalid table");
   1.388 +
   1.389 +  resized_table->_entries = _table->_entries;
   1.390 +
   1.391 +  // Free old table
   1.392 +  delete _table;
   1.393 +
   1.394 +  // Install new table
   1.395 +  _table = resized_table;
   1.396 +}
   1.397 +
   1.398 +void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) {
   1.399 +  // The table is divided into partitions to allow lock-less parallel processing by
   1.400 +  // multiple worker threads. A worker thread first claims a partition, which ensures
   1.401 +  // exclusive access to that part of the table, then continues to process it. To allow
   1.402 +  // shrinking of the table in parallel we also need to make sure that the same worker
   1.403 +  // thread processes all partitions where entries will hash to the same destination
   1.404 +  // partition. Since the table size is always a power of two and we always shrink by
   1.405 +  // dividing the table in half, we know that for a given partition there is only one
   1.406 +  // other partition whoes entries will hash to the same destination partition. That
   1.407 +  // other partition is always the sibling partition in the second half of the table.
   1.408 +  // For example, if the table is divided into 8 partitions, the sibling of partition 0
   1.409 +  // is partition 4, the sibling of partition 1 is partition 5, etc.
   1.410 +  size_t table_half = _table->_size / 2;
   1.411 +
   1.412 +  // Let each partition be one page worth of buckets
   1.413 +  size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*));
   1.414 +  assert(table_half % partition_size == 0, "Invalid partition size");
   1.415 +
   1.416 +  // Number of entries removed during the scan
   1.417 +  uintx removed = 0;
   1.418 +
   1.419 +  for (;;) {
   1.420 +    // Grab next partition to scan
   1.421 +    size_t partition_begin = cl->claim_table_partition(partition_size);
   1.422 +    size_t partition_end = partition_begin + partition_size;
   1.423 +    if (partition_begin >= table_half) {
   1.424 +      // End of table
   1.425 +      break;
   1.426 +    }
   1.427 +
   1.428 +    // Scan the partition followed by the sibling partition in the second half of the table
   1.429 +    removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id);
   1.430 +    removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
   1.431 +  }
   1.432 +
   1.433 +  // Delayed update avoid contention on the table lock
   1.434 +  if (removed > 0) {
   1.435 +    MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
   1.436 +    _table->_entries -= removed;
   1.437 +    _entries_removed += removed;
   1.438 +  }
   1.439 +}
   1.440 +
   1.441 +uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl,
   1.442 +                                            size_t partition_begin,
   1.443 +                                            size_t partition_end,
   1.444 +                                            uint worker_id) {
   1.445 +  uintx removed = 0;
   1.446 +  for (size_t bucket = partition_begin; bucket < partition_end; bucket++) {
   1.447 +    G1StringDedupEntry** entry = _table->bucket(bucket);
   1.448 +    while (*entry != NULL) {
   1.449 +      oop* p = (oop*)(*entry)->obj_addr();
   1.450 +      if (cl->is_alive(*p)) {
   1.451 +        cl->keep_alive(p);
   1.452 +        if (cl->is_resizing()) {
   1.453 +          // We are resizing the table, transfer entry to the new table
   1.454 +          _table->transfer(entry, cl->resized_table());
   1.455 +        } else {
   1.456 +          if (cl->is_rehashing()) {
   1.457 +            // We are rehashing the table, rehash the entry but keep it
   1.458 +            // in the table. We can't transfer entries into the new table
   1.459 +            // at this point since we don't have exclusive access to all
   1.460 +            // destination partitions. finish_rehash() will do a single
   1.461 +            // threaded transfer of all entries.
   1.462 +            typeArrayOop value = (typeArrayOop)*p;
   1.463 +            unsigned int hash = hash_code(value);
   1.464 +            (*entry)->set_hash(hash);
   1.465 +          }
   1.466 +
   1.467 +          // Move to next entry
   1.468 +          entry = (*entry)->next_addr();
   1.469 +        }
   1.470 +      } else {
   1.471 +        // Not alive, remove entry from table
   1.472 +        _table->remove(entry, worker_id);
   1.473 +        removed++;
   1.474 +      }
   1.475 +    }
   1.476 +  }
   1.477 +
   1.478 +  return removed;
   1.479 +}
   1.480 +
   1.481 +G1StringDedupTable* G1StringDedupTable::prepare_rehash() {
   1.482 +  if (!_table->_rehash_needed && !StringDeduplicationRehashALot) {
   1.483 +    // Rehash not needed
   1.484 +    return NULL;
   1.485 +  }
   1.486 +
   1.487 +  // Update statistics
   1.488 +  _rehash_count++;
   1.489 +
   1.490 +  // Compute new hash seed
   1.491 +  _table->_hash_seed = AltHashing::compute_seed();
   1.492 +
   1.493 +  // Allocate the new table, same size and hash seed
   1.494 +  return new G1StringDedupTable(_table->_size, _table->_hash_seed);
   1.495 +}
   1.496 +
   1.497 +void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) {
   1.498 +  assert(rehashed_table != NULL, "Invalid table");
   1.499 +
   1.500 +  // Move all newly rehashed entries into the correct buckets in the new table
   1.501 +  for (size_t bucket = 0; bucket < _table->_size; bucket++) {
   1.502 +    G1StringDedupEntry** entry = _table->bucket(bucket);
   1.503 +    while (*entry != NULL) {
   1.504 +      _table->transfer(entry, rehashed_table);
   1.505 +    }
   1.506 +  }
   1.507 +
   1.508 +  rehashed_table->_entries = _table->_entries;
   1.509 +
   1.510 +  // Free old table
   1.511 +  delete _table;
   1.512 +
   1.513 +  // Install new table
   1.514 +  _table = rehashed_table;
   1.515 +}
   1.516 +
   1.517 +void G1StringDedupTable::verify() {
   1.518 +  for (size_t bucket = 0; bucket < _table->_size; bucket++) {
   1.519 +    // Verify entries
   1.520 +    G1StringDedupEntry** entry = _table->bucket(bucket);
   1.521 +    while (*entry != NULL) {
   1.522 +      typeArrayOop value = (*entry)->obj();
   1.523 +      guarantee(value != NULL, "Object must not be NULL");
   1.524 +      guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
   1.525 +      guarantee(!value->is_forwarded(), "Object must not be forwarded");
   1.526 +      guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
   1.527 +      unsigned int hash = hash_code(value);
   1.528 +      guarantee((*entry)->hash() == hash, "Table entry has inorrect hash");
   1.529 +      guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index");
   1.530 +      entry = (*entry)->next_addr();
   1.531 +    }
   1.532 +
   1.533 +    // Verify that we do not have entries with identical oops or identical arrays.
   1.534 +    // We only need to compare entries in the same bucket. If the same oop or an
   1.535 +    // identical array has been inserted more than once into different/incorrect
   1.536 +    // buckets the verification step above will catch that.
   1.537 +    G1StringDedupEntry** entry1 = _table->bucket(bucket);
   1.538 +    while (*entry1 != NULL) {
   1.539 +      typeArrayOop value1 = (*entry1)->obj();
   1.540 +      G1StringDedupEntry** entry2 = (*entry1)->next_addr();
   1.541 +      while (*entry2 != NULL) {
   1.542 +        typeArrayOop value2 = (*entry2)->obj();
   1.543 +        guarantee(!equals(value1, value2), "Table entries must not have identical arrays");
   1.544 +        entry2 = (*entry2)->next_addr();
   1.545 +      }
   1.546 +      entry1 = (*entry1)->next_addr();
   1.547 +    }
   1.548 +  }
   1.549 +}
   1.550 +
   1.551 +void G1StringDedupTable::trim_entry_cache() {
   1.552 +  MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
   1.553 +  size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor);
   1.554 +  _entry_cache->trim(max_cache_size);
   1.555 +}
   1.556 +
   1.557 +void G1StringDedupTable::print_statistics(outputStream* st) {
   1.558 +  st->print_cr(
   1.559 +    "   [Table]\n"
   1.560 +    "      [Memory Usage: "G1_STRDEDUP_BYTES_FORMAT_NS"]\n"
   1.561 +    "      [Size: "SIZE_FORMAT", Min: "SIZE_FORMAT", Max: "SIZE_FORMAT"]\n"
   1.562 +    "      [Entries: "UINTX_FORMAT", Load: "G1_STRDEDUP_PERCENT_FORMAT_NS", Cached: " UINTX_FORMAT ", Added: "UINTX_FORMAT", Removed: "UINTX_FORMAT"]\n"
   1.563 +    "      [Resize Count: "UINTX_FORMAT", Shrink Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS"), Grow Threshold: "UINTX_FORMAT"("G1_STRDEDUP_PERCENT_FORMAT_NS")]\n"
   1.564 +    "      [Rehash Count: "UINTX_FORMAT", Rehash Threshold: "UINTX_FORMAT", Hash Seed: 0x%x]\n"
   1.565 +    "      [Age Threshold: "UINTX_FORMAT"]",
   1.566 +    G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)),
   1.567 +    _table->_size, _min_size, _max_size,
   1.568 +    _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed,
   1.569 +    _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0,
   1.570 +    _rehash_count, _rehash_threshold, _table->_hash_seed,
   1.571 +    StringDeduplicationAgeThreshold);
   1.572 +}

mercurial