src/share/vm/utilities/hashtable.cpp

Mon, 15 May 2017 12:20:15 +0200

author
tschatzl
date
Mon, 15 May 2017 12:20:15 +0200
changeset 8766
ce9a710b0f63
parent 7208
7baf47cb97cb
child 8856
ac27a9c85bea
child 9614
bb44c0e88235
permissions
-rw-r--r--

8180048: Interned string and symbol table leak memory during parallel unlinking
Summary: Make appending found dead BasicHashtableEntrys to the free list atomic.
Reviewed-by: ehelin, shade

     1 /*
     2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/altHashing.hpp"
    27 #include "classfile/javaClasses.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "memory/filemap.hpp"
    30 #include "memory/resourceArea.hpp"
    31 #include "oops/oop.inline.hpp"
    32 #include "runtime/safepoint.hpp"
    33 #include "utilities/dtrace.hpp"
    34 #include "utilities/hashtable.hpp"
    35 #include "utilities/hashtable.inline.hpp"
    36 #include "utilities/numberSeq.hpp"
    39 // This hashtable is implemented as an open hash table with a fixed number of buckets.
    41 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
    42   BasicHashtableEntry<F>* entry = NULL;
    43   if (_free_list != NULL) {
    44     entry = _free_list;
    45     _free_list = _free_list->next();
    46   }
    47   return entry;
    48 }
    50 // HashtableEntrys are allocated in blocks to reduce the space overhead.
    51 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
    52   BasicHashtableEntry<F>* entry = new_entry_free_list();
    54   if (entry == NULL) {
    55     if (_first_free_entry + _entry_size >= _end_block) {
    56       int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
    57       int len = _entry_size * block_size;
    58       len = 1 << log2_intptr(len); // round down to power of 2
    59       assert(len >= _entry_size, "");
    60       _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
    61       _end_block = _first_free_entry + len;
    62     }
    63     entry = (BasicHashtableEntry<F>*)_first_free_entry;
    64     _first_free_entry += _entry_size;
    65   }
    67   assert(_entry_size % HeapWordSize == 0, "");
    68   entry->set_hash(hashValue);
    69   return entry;
    70 }
    73 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
    74   HashtableEntry<T, F>* entry;
    76   entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
    77   entry->set_literal(obj);
    78   return entry;
    79 }
    81 // Check to see if the hashtable is unbalanced.  The caller set a flag to
    82 // rehash at the next safepoint.  If this bucket is 60 times greater than the
    83 // expected average bucket length, it's an unbalanced hashtable.
    84 // This is somewhat an arbitrary heuristic but if one bucket gets to
    85 // rehash_count which is currently 100, there's probably something wrong.
    87 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
    88   assert(this->table_size() != 0, "underflow");
    89   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
    90     // Set a flag for the next safepoint, which should be at some guaranteed
    91     // safepoint interval.
    92     return true;
    93   }
    94   return false;
    95 }
    97 template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
    99 // Create a new table and using alternate hash code, populate the new table
   100 // with the existing elements.   This can be used to change the hash code
   101 // and could in the future change the size of the table.
   103 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
   105   // Initialize the global seed for hashing.
   106   _seed = AltHashing::compute_seed();
   107   assert(seed() != 0, "shouldn't be zero");
   109   int saved_entry_count = this->number_of_entries();
   111   // Iterate through the table and create a new entry for the new table
   112   for (int i = 0; i < new_table->table_size(); ++i) {
   113     for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
   114       HashtableEntry<T, F>* next = p->next();
   115       T string = p->literal();
   116       // Use alternate hashing algorithm on the symbol in the first table
   117       unsigned int hashValue = string->new_hash(seed());
   118       // Get a new index relative to the new table (can also change size)
   119       int index = new_table->hash_to_index(hashValue);
   120       p->set_hash(hashValue);
   121       // Keep the shared bit in the Hashtable entry to indicate that this entry
   122       // can't be deleted.   The shared bit is the LSB in the _next field so
   123       // walking the hashtable past these entries requires
   124       // BasicHashtableEntry::make_ptr() call.
   125       bool keep_shared = p->is_shared();
   126       this->unlink_entry(p);
   127       new_table->add_entry(index, p);
   128       if (keep_shared) {
   129         p->set_shared();
   130       }
   131       p = next;
   132     }
   133   }
   134   // give the new table the free list as well
   135   new_table->copy_freelist(this);
   136   assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?");
   138   // Destroy memory used by the buckets in the hashtable.  The memory
   139   // for the elements has been used in a new table and is not
   140   // destroyed.  The memory reuse will benefit resizing the SystemDictionary
   141   // to avoid a memory allocation spike at safepoint.
   142   BasicHashtable<F>::free_buckets();
   143 }
   145 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
   146   if (NULL != _buckets) {
   147     // Don't delete the buckets in the shared space.  They aren't
   148     // allocated by os::malloc
   149     if (!UseSharedSpaces ||
   150         !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
   151        FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F);
   152     }
   153     _buckets = NULL;
   154   }
   155 }
   158 // Reverse the order of elements in the hash buckets.
   160 template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
   162   for (int i = 0; i < _table_size; ++i) {
   163     BasicHashtableEntry<F>* new_list = NULL;
   164     BasicHashtableEntry<F>* p = bucket(i);
   165     while (p != NULL) {
   166       BasicHashtableEntry<F>* next = p->next();
   167       p->set_next(new_list);
   168       new_list = p;
   169       p = next;
   170     }
   171     *bucket_addr(i) = new_list;
   172   }
   173 }
   175 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
   176   entry->set_next(_removed_head);
   177   _removed_head = entry;
   178   if (_removed_tail == NULL) {
   179     _removed_tail = entry;
   180   }
   181   _num_removed++;
   182 }
   184 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
   185   if (context->_num_removed == 0) {
   186     assert(context->_removed_head == NULL && context->_removed_tail == NULL,
   187            err_msg("Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
   188                    p2i(context->_removed_head), p2i(context->_removed_tail)));
   189     return;
   190   }
   192   // MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
   193   BasicHashtableEntry<F>* current = _free_list;
   194   while (true) {
   195     context->_removed_tail->set_next(current);
   196     BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
   197     if (old == current) {
   198       break;
   199     }
   200     current = old;
   201   }
   202   Atomic::add(-context->_num_removed, &_number_of_entries);
   203 }
   205 // Copy the table to the shared space.
   207 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
   209   // Dump the hash table entries.
   211   intptr_t *plen = (intptr_t*)(*top);
   212   *top += sizeof(*plen);
   214   int i;
   215   for (i = 0; i < _table_size; ++i) {
   216     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
   217                               *p != NULL;
   218                                p = (*p)->next_addr()) {
   219       if (*top + entry_size() > end) {
   220         report_out_of_shared_space(SharedMiscData);
   221       }
   222       *p = (BasicHashtableEntry<F>*)memcpy(*top, *p, entry_size());
   223       *top += entry_size();
   224     }
   225   }
   226   *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
   228   // Set the shared bit.
   230   for (i = 0; i < _table_size; ++i) {
   231     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
   232       p->set_shared();
   233     }
   234   }
   235 }
   239 // Reverse the order of elements in the hash buckets.
   241 template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
   243   for (int i = 0; i < this->table_size(); ++i) {
   244     HashtableEntry<T, F>* high_list = NULL;
   245     HashtableEntry<T, F>* low_list = NULL;
   246     HashtableEntry<T, F>* last_low_entry = NULL;
   247     HashtableEntry<T, F>* p = bucket(i);
   248     while (p != NULL) {
   249       HashtableEntry<T, F>* next = p->next();
   250       if ((void*)p->literal() >= boundary) {
   251         p->set_next(high_list);
   252         high_list = p;
   253       } else {
   254         p->set_next(low_list);
   255         low_list = p;
   256         if (last_low_entry == NULL) {
   257           last_low_entry = p;
   258         }
   259       }
   260       p = next;
   261     }
   262     if (low_list != NULL) {
   263       *bucket_addr(i) = low_list;
   264       last_low_entry->set_next(high_list);
   265     } else {
   266       *bucket_addr(i) = high_list;
   267     }
   268   }
   269 }
   271 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
   272   return symbol->size() * HeapWordSize;
   273 }
   275 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
   276   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
   277   // and the String.value array is shared by several Strings. However, starting from JDK8,
   278   // the String.value array is not shared anymore.
   279   assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported");
   280   return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize;
   281 }
   283 // Dump footprint and bucket length statistics
   284 //
   285 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
   286 // add a new function Hashtable<T, F>::literal_size(MyNewType lit)
   288 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
   289   NumberSeq summary;
   290   int literal_bytes = 0;
   291   for (int i = 0; i < this->table_size(); ++i) {
   292     int count = 0;
   293     for (HashtableEntry<T, F>* e = this->bucket(i);
   294        e != NULL; e = e->next()) {
   295       count++;
   296       literal_bytes += literal_size(e->literal());
   297     }
   298     summary.add((double)count);
   299   }
   300   double num_buckets = summary.num();
   301   double num_entries = summary.sum();
   303   int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
   304   int entry_bytes  = (int)num_entries * sizeof(HashtableEntry<T, F>);
   305   int total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
   307   double bucket_avg  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
   308   double entry_avg   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
   309   double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
   311   st->print_cr("%s statistics:", table_name);
   312   st->print_cr("Number of buckets       : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes,  bucket_avg);
   313   st->print_cr("Number of entries       : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes,   entry_avg);
   314   st->print_cr("Number of literals      : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
   315   st->print_cr("Total footprint         : %9s = %9d bytes", "", total_bytes);
   316   st->print_cr("Average bucket size     : %9.3f", summary.avg());
   317   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
   318   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
   319   st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
   320 }
   323 // Dump the hash table buckets.
   325 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
   326   intptr_t len = _table_size * sizeof(HashtableBucket<F>);
   327   *(intptr_t*)(*top) = len;
   328   *top += sizeof(intptr_t);
   330   *(intptr_t*)(*top) = _number_of_entries;
   331   *top += sizeof(intptr_t);
   333   if (*top + len > end) {
   334     report_out_of_shared_space(SharedMiscData);
   335   }
   336   _buckets = (HashtableBucket<F>*)memcpy(*top, _buckets, len);
   337   *top += len;
   338 }
   341 #ifndef PRODUCT
   343 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
   344   ResourceMark rm;
   346   for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
   347     HashtableEntry<T, F>* entry = bucket(i);
   348     while(entry != NULL) {
   349       tty->print("%d : ", i);
   350       entry->literal()->print();
   351       tty->cr();
   352       entry = entry->next();
   353     }
   354   }
   355 }
   358 template <MEMFLAGS F> void BasicHashtable<F>::verify() {
   359   int count = 0;
   360   for (int i = 0; i < table_size(); i++) {
   361     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
   362       ++count;
   363     }
   364   }
   365   assert(count == number_of_entries(), "number of hashtable entries incorrect");
   366 }
   369 #endif // PRODUCT
   372 #ifdef ASSERT
   374 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) {
   375   if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
   376     warning("Performance bug: SystemDictionary lookup_count=%d "
   377             "lookup_length=%d average=%lf load=%f",
   378             _lookup_count, _lookup_length,
   379             (double) _lookup_length / _lookup_count, load);
   380   }
   381 }
   383 #endif
   384 // Explicitly instantiate these types
   385 #if INCLUDE_ALL_GCS
   386 template class Hashtable<nmethod*, mtGC>;
   387 template class HashtableEntry<nmethod*, mtGC>;
   388 template class BasicHashtable<mtGC>;
   389 #endif
   390 template class Hashtable<ConstantPool*, mtClass>;
   391 template class RehashableHashtable<Symbol*, mtSymbol>;
   392 template class RehashableHashtable<oopDesc*, mtSymbol>;
   393 template class Hashtable<Symbol*, mtSymbol>;
   394 template class Hashtable<Klass*, mtClass>;
   395 template class Hashtable<oop, mtClass>;
   396 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
   397 template class Hashtable<oop, mtSymbol>;
   398 template class RehashableHashtable<oop, mtSymbol>;
   399 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
   400 template class Hashtable<oopDesc*, mtSymbol>;
   401 template class Hashtable<Symbol*, mtClass>;
   402 template class HashtableEntry<Symbol*, mtSymbol>;
   403 template class HashtableEntry<Symbol*, mtClass>;
   404 template class HashtableEntry<oop, mtSymbol>;
   405 template class BasicHashtableEntry<mtSymbol>;
   406 template class BasicHashtableEntry<mtCode>;
   407 template class BasicHashtable<mtClass>;
   408 template class BasicHashtable<mtSymbol>;
   409 template class BasicHashtable<mtCode>;
   410 template class BasicHashtable<mtInternal>;

mercurial