src/share/vm/utilities/hashtable.cpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 867
275a3b7ff0d6
child 905
ad8c8ca4ab0f
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

     1 /*
     2  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_hashtable.cpp.incl"
    28 HS_DTRACE_PROBE_DECL4(hs_private, hashtable__new_entry,
    29   void*, unsigned int, oop, void*);
    31 // This is a generic hashtable, designed to be used for the symbol
    32 // and string tables.
    33 //
    34 // It is implemented as an open hash table with a fixed number of buckets.
    35 //
    36 // %note:
    37 //  - HashtableEntrys are allocated in blocks to reduce the space overhead.
    39 BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) {
    40   BasicHashtableEntry* entry;
    42   if (_free_list) {
    43     entry = _free_list;
    44     _free_list = _free_list->next();
    45   } else {
    46     if (_first_free_entry + _entry_size >= _end_block) {
    47       int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
    48       int len = _entry_size * block_size;
    49       len = 1 << log2_intptr(len); // round down to power of 2
    50       assert(len >= _entry_size, "");
    51       _first_free_entry = NEW_C_HEAP_ARRAY(char, len);
    52       _end_block = _first_free_entry + len;
    53     }
    54     entry = (BasicHashtableEntry*)_first_free_entry;
    55     _first_free_entry += _entry_size;
    56   }
    58   assert(_entry_size % HeapWordSize == 0, "");
    59   entry->set_hash(hashValue);
    60   return entry;
    61 }
    64 HashtableEntry* Hashtable::new_entry(unsigned int hashValue, oop obj) {
    65   HashtableEntry* entry;
    67   entry = (HashtableEntry*)BasicHashtable::new_entry(hashValue);
    68   entry->set_literal(obj);   // clears literal string field
    69   HS_DTRACE_PROBE4(hs_private, hashtable__new_entry,
    70     this, hashValue, obj, entry);
    71   return entry;
    72 }
    75 // GC support
    77 void Hashtable::unlink(BoolObjectClosure* is_alive) {
    78   // Readers of the table are unlocked, so we should only be removing
    79   // entries at a safepoint.
    80   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
    81   for (int i = 0; i < table_size(); ++i) {
    82     for (HashtableEntry** p = bucket_addr(i); *p != NULL; ) {
    83       HashtableEntry* entry = *p;
    84       if (entry->is_shared()) {
    85         break;
    86       }
    87       assert(entry->literal() != NULL, "just checking");
    88       if (is_alive->do_object_b(entry->literal())) {
    89         p = entry->next_addr();
    90       } else {
    91         *p = entry->next();
    92         free_entry(entry);
    93       }
    94     }
    95   }
    96 }
    99 void Hashtable::oops_do(OopClosure* f) {
   100   for (int i = 0; i < table_size(); ++i) {
   101     HashtableEntry** p = bucket_addr(i);
   102     HashtableEntry* entry = bucket(i);
   103     while (entry != NULL) {
   104       f->do_oop(entry->literal_addr());
   106       // Did the closure remove the literal from the table?
   107       if (entry->literal() == NULL) {
   108         assert(!entry->is_shared(), "immutable hashtable entry?");
   109         *p = entry->next();
   110         free_entry(entry);
   111       } else {
   112         p = entry->next_addr();
   113       }
   114       entry = (HashtableEntry*)HashtableEntry::make_ptr(*p);
   115     }
   116   }
   117 }
   120 // Reverse the order of elements in the hash buckets.
   122 void BasicHashtable::reverse() {
   124   for (int i = 0; i < _table_size; ++i) {
   125     BasicHashtableEntry* new_list = NULL;
   126     BasicHashtableEntry* p = bucket(i);
   127     while (p != NULL) {
   128       BasicHashtableEntry* next = p->next();
   129       p->set_next(new_list);
   130       new_list = p;
   131       p = next;
   132     }
   133     *bucket_addr(i) = new_list;
   134   }
   135 }
   138 // Copy the table to the shared space.
   140 void BasicHashtable::copy_table(char** top, char* end) {
   142   // Dump the hash table entries.
   144   intptr_t *plen = (intptr_t*)(*top);
   145   *top += sizeof(*plen);
   147   int i;
   148   for (i = 0; i < _table_size; ++i) {
   149     for (BasicHashtableEntry** p = _buckets[i].entry_addr();
   150                               *p != NULL;
   151                                p = (*p)->next_addr()) {
   152       if (*top + entry_size() > end) {
   153         warning("\nThe shared miscellaneous data space is not large "
   154                 "enough to \npreload requested classes.  Use "
   155                 "-XX:SharedMiscDataSize= to increase \nthe initial "
   156                 "size of the miscellaneous data space.\n");
   157         exit(2);
   158       }
   159       *p = (BasicHashtableEntry*)memcpy(*top, *p, entry_size());
   160       *top += entry_size();
   161     }
   162   }
   163   *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
   165   // Set the shared bit.
   167   for (i = 0; i < _table_size; ++i) {
   168     for (BasicHashtableEntry* p = bucket(i); p != NULL; p = p->next()) {
   169       p->set_shared();
   170     }
   171   }
   172 }
   176 // Reverse the order of elements in the hash buckets.
   178 void Hashtable::reverse(void* boundary) {
   180   for (int i = 0; i < table_size(); ++i) {
   181     HashtableEntry* high_list = NULL;
   182     HashtableEntry* low_list = NULL;
   183     HashtableEntry* last_low_entry = NULL;
   184     HashtableEntry* p = bucket(i);
   185     while (p != NULL) {
   186       HashtableEntry* next = p->next();
   187       if ((void*)p->literal() >= boundary) {
   188         p->set_next(high_list);
   189         high_list = p;
   190       } else {
   191         p->set_next(low_list);
   192         low_list = p;
   193         if (last_low_entry == NULL) {
   194           last_low_entry = p;
   195         }
   196       }
   197       p = next;
   198     }
   199     if (low_list != NULL) {
   200       *bucket_addr(i) = low_list;
   201       last_low_entry->set_next(high_list);
   202     } else {
   203       *bucket_addr(i) = high_list;
   204     }
   205   }
   206 }
   209 // Dump the hash table buckets.
   211 void BasicHashtable::copy_buckets(char** top, char* end) {
   212   intptr_t len = _table_size * sizeof(HashtableBucket);
   213   *(intptr_t*)(*top) = len;
   214   *top += sizeof(intptr_t);
   216   *(intptr_t*)(*top) = _number_of_entries;
   217   *top += sizeof(intptr_t);
   219   if (*top + len > end) {
   220     warning("\nThe shared miscellaneous data space is not large "
   221             "enough to \npreload requested classes.  Use "
   222             "-XX:SharedMiscDataSize= to increase \nthe initial "
   223             "size of the miscellaneous data space.\n");
   224     exit(2);
   225   }
   226   _buckets = (HashtableBucket*)memcpy(*top, _buckets, len);
   227   *top += len;
   228 }
   231 #ifndef PRODUCT
   233 void Hashtable::print() {
   234   ResourceMark rm;
   236   for (int i = 0; i < table_size(); i++) {
   237     HashtableEntry* entry = bucket(i);
   238     while(entry != NULL) {
   239       tty->print("%d : ", i);
   240       entry->literal()->print();
   241       tty->cr();
   242       entry = entry->next();
   243     }
   244   }
   245 }
   248 void BasicHashtable::verify() {
   249   int count = 0;
   250   for (int i = 0; i < table_size(); i++) {
   251     for (BasicHashtableEntry* p = bucket(i); p != NULL; p = p->next()) {
   252       ++count;
   253     }
   254   }
   255   assert(count == number_of_entries(), "number of hashtable entries incorrect");
   256 }
   259 #endif // PRODUCT
   262 #ifdef ASSERT
   264 void BasicHashtable::verify_lookup_length(double load) {
   265   if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
   266     warning("Performance bug: SystemDictionary lookup_count=%d "
   267             "lookup_length=%d average=%lf load=%f",
   268             _lookup_count, _lookup_length,
   269             (double) _lookup_length / _lookup_count, load);
   270   }
   271 }
   273 #endif

mercurial