Mon, 15 May 2017 12:20:15 +0200
8180048: Interned string and symbol table leak memory during parallel unlinking
Summary: Make appending found dead BasicHashtableEntrys to the free list atomic.
Reviewed-by: ehelin, shade
1 /*
2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_UTILITIES_HASHTABLE_HPP
26 #define SHARE_VM_UTILITIES_HASHTABLE_HPP
28 #include "classfile/classLoaderData.hpp"
29 #include "memory/allocation.hpp"
30 #include "oops/oop.hpp"
31 #include "oops/symbol.hpp"
32 #include "runtime/handles.hpp"
34 // This is a generic hashtable, designed to be used for the symbol
35 // and string tables.
36 //
37 // It is implemented as an open hash table with a fixed number of buckets.
38 //
39 // %note:
40 // - TableEntrys are allocated in blocks to reduce the space overhead.
44 template <MEMFLAGS F> class BasicHashtableEntry : public CHeapObj<F> {
45 friend class VMStructs;
46 private:
47 unsigned int _hash; // 32-bit hash for item
49 // Link to next element in the linked list for this bucket. EXCEPT
50 // bit 0 set indicates that this entry is shared and must not be
51 // unlinked from the table. Bit 0 is set during the dumping of the
52 // archive. Since shared entries are immutable, _next fields in the
53 // shared entries will not change. New entries will always be
54 // unshared and since pointers are align, bit 0 will always remain 0
55 // with no extra effort.
56 BasicHashtableEntry<F>* _next;
58 // Windows IA64 compiler requires subclasses to be able to access these
59 protected:
60 // Entry objects should not be created, they should be taken from the
61 // free list with BasicHashtable.new_entry().
62 BasicHashtableEntry() { ShouldNotReachHere(); }
63 // Entry objects should not be destroyed. They should be placed on
64 // the free list instead with BasicHashtable.free_entry().
65 ~BasicHashtableEntry() { ShouldNotReachHere(); }
67 public:
69 unsigned int hash() const { return _hash; }
70 void set_hash(unsigned int hash) { _hash = hash; }
71 unsigned int* hash_addr() { return &_hash; }
73 static BasicHashtableEntry<F>* make_ptr(BasicHashtableEntry<F>* p) {
74 return (BasicHashtableEntry*)((intptr_t)p & -2);
75 }
77 BasicHashtableEntry<F>* next() const {
78 return make_ptr(_next);
79 }
81 void set_next(BasicHashtableEntry<F>* next) {
82 _next = next;
83 }
85 BasicHashtableEntry<F>** next_addr() {
86 return &_next;
87 }
89 bool is_shared() const {
90 return ((intptr_t)_next & 1) != 0;
91 }
93 void set_shared() {
94 _next = (BasicHashtableEntry<F>*)((intptr_t)_next | 1);
95 }
96 };
100 template <class T, MEMFLAGS F> class HashtableEntry : public BasicHashtableEntry<F> {
101 friend class VMStructs;
102 private:
103 T _literal; // ref to item in table.
105 public:
106 // Literal
107 T literal() const { return _literal; }
108 T* literal_addr() { return &_literal; }
109 void set_literal(T s) { _literal = s; }
111 HashtableEntry* next() const {
112 return (HashtableEntry*)BasicHashtableEntry<F>::next();
113 }
114 HashtableEntry** next_addr() {
115 return (HashtableEntry**)BasicHashtableEntry<F>::next_addr();
116 }
117 };
121 template <MEMFLAGS F> class HashtableBucket : public CHeapObj<F> {
122 friend class VMStructs;
123 private:
124 // Instance variable
125 BasicHashtableEntry<F>* _entry;
127 public:
128 // Accessing
129 void clear() { _entry = NULL; }
131 // The following methods use order access methods to avoid race
132 // conditions in multiprocessor systems.
133 BasicHashtableEntry<F>* get_entry() const;
134 void set_entry(BasicHashtableEntry<F>* l);
136 // The following method is not MT-safe and must be done under lock.
137 BasicHashtableEntry<F>** entry_addr() { return &_entry; }
138 };
141 template <MEMFLAGS F> class BasicHashtable : public CHeapObj<F> {
142 friend class VMStructs;
144 public:
145 BasicHashtable(int table_size, int entry_size);
146 BasicHashtable(int table_size, int entry_size,
147 HashtableBucket<F>* buckets, int number_of_entries);
149 // Sharing support.
150 void copy_buckets(char** top, char* end);
151 void copy_table(char** top, char* end);
153 // Bucket handling
154 int hash_to_index(unsigned int full_hash) {
155 int h = full_hash % _table_size;
156 assert(h >= 0 && h < _table_size, "Illegal hash value");
157 return h;
158 }
160 // Reverse the order of elements in each of the buckets.
161 void reverse();
163 private:
164 // Instance variables
165 int _table_size;
166 HashtableBucket<F>* _buckets;
167 BasicHashtableEntry<F>* volatile _free_list;
168 char* _first_free_entry;
169 char* _end_block;
170 int _entry_size;
171 volatile int _number_of_entries;
173 protected:
175 #ifdef ASSERT
176 int _lookup_count;
177 int _lookup_length;
178 void verify_lookup_length(double load);
179 #endif
181 void initialize(int table_size, int entry_size, int number_of_entries);
183 // Accessor
184 int entry_size() const { return _entry_size; }
186 // The following method is MT-safe and may be used with caution.
187 BasicHashtableEntry<F>* bucket(int i);
189 // The following method is not MT-safe and must be done under lock.
190 BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
192 // Attempt to get an entry from the free list
193 BasicHashtableEntry<F>* new_entry_free_list();
195 // Table entry management
196 BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
198 // Used when moving the entry to another table
199 // Clean up links, but do not add to free_list
200 void unlink_entry(BasicHashtableEntry<F>* entry) {
201 entry->set_next(NULL);
202 --_number_of_entries;
203 }
205 // Move over freelist and free block for allocation
206 void copy_freelist(BasicHashtable* src) {
207 _free_list = src->_free_list;
208 src->_free_list = NULL;
209 _first_free_entry = src->_first_free_entry;
210 src->_first_free_entry = NULL;
211 _end_block = src->_end_block;
212 src->_end_block = NULL;
213 }
215 // Free the buckets in this hashtable
216 void free_buckets();
218 // Helper data structure containing context for the bucket entry unlink process,
219 // storing the unlinked buckets in a linked list.
220 // Also avoids the need to pass around these four members as parameters everywhere.
221 struct BucketUnlinkContext {
222 int _num_processed;
223 int _num_removed;
224 // Head and tail pointers for the linked list of removed entries.
225 BasicHashtableEntry<F>* _removed_head;
226 BasicHashtableEntry<F>* _removed_tail;
228 BucketUnlinkContext() : _num_processed(0), _num_removed(0), _removed_head(NULL), _removed_tail(NULL) {
229 }
231 void free_entry(BasicHashtableEntry<F>* entry);
232 };
233 // Add of bucket entries linked together in the given context to the global free list. This method
234 // is mt-safe wrt. to other calls of this method.
235 void bulk_free_entries(BucketUnlinkContext* context);
236 public:
237 int table_size() { return _table_size; }
238 void set_entry(int index, BasicHashtableEntry<F>* entry);
240 void add_entry(int index, BasicHashtableEntry<F>* entry);
242 void free_entry(BasicHashtableEntry<F>* entry);
244 int number_of_entries() { return _number_of_entries; }
246 void verify() PRODUCT_RETURN;
247 };
250 template <class T, MEMFLAGS F> class Hashtable : public BasicHashtable<F> {
251 friend class VMStructs;
253 public:
254 Hashtable(int table_size, int entry_size)
255 : BasicHashtable<F>(table_size, entry_size) { }
257 Hashtable(int table_size, int entry_size,
258 HashtableBucket<F>* buckets, int number_of_entries)
259 : BasicHashtable<F>(table_size, entry_size, buckets, number_of_entries) { }
261 // Debugging
262 void print() PRODUCT_RETURN;
264 // Reverse the order of elements in each of the buckets. Hashtable
265 // entries which refer to objects at a lower address than 'boundary'
266 // are separated from those which refer to objects at higher
267 // addresses, and appear first in the list.
268 void reverse(void* boundary = NULL);
270 protected:
272 unsigned int compute_hash(Symbol* name) {
273 return (unsigned int) name->identity_hash();
274 }
276 int index_for(Symbol* name) {
277 return this->hash_to_index(compute_hash(name));
278 }
280 // Table entry management
281 HashtableEntry<T, F>* new_entry(unsigned int hashValue, T obj);
283 // The following method is MT-safe and may be used with caution.
284 HashtableEntry<T, F>* bucket(int i) {
285 return (HashtableEntry<T, F>*)BasicHashtable<F>::bucket(i);
286 }
288 // The following method is not MT-safe and must be done under lock.
289 HashtableEntry<T, F>** bucket_addr(int i) {
290 return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
291 }
293 };
295 template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
296 protected:
298 enum {
299 rehash_count = 100,
300 rehash_multiple = 60
301 };
303 // Check that the table is unbalanced
304 bool check_rehash_table(int count);
306 public:
307 RehashableHashtable(int table_size, int entry_size)
308 : Hashtable<T, F>(table_size, entry_size) { }
310 RehashableHashtable(int table_size, int entry_size,
311 HashtableBucket<F>* buckets, int number_of_entries)
312 : Hashtable<T, F>(table_size, entry_size, buckets, number_of_entries) { }
315 // Function to move these elements into the new table.
316 void move_to(RehashableHashtable<T, F>* new_table);
317 static bool use_alternate_hashcode() { return _seed != 0; }
318 static juint seed() { return _seed; }
320 static int literal_size(Symbol *symbol);
321 static int literal_size(oop oop);
323 // The following two are currently not used, but are needed anyway because some
324 // C++ compilers (MacOS and Solaris) force the instantiation of
325 // Hashtable<ConstantPool*, mtClass>::dump_table() even though we never call this function
326 // in the VM code.
327 static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
328 static int literal_size(Klass *k) {Unimplemented(); return 0;}
330 void dump_table(outputStream* st, const char *table_name);
332 private:
333 static juint _seed;
334 };
337 // Verions of hashtable where two handles are used to compute the index.
339 template <class T, MEMFLAGS F> class TwoOopHashtable : public Hashtable<T, F> {
340 friend class VMStructs;
341 protected:
342 TwoOopHashtable(int table_size, int entry_size)
343 : Hashtable<T, F>(table_size, entry_size) {}
345 TwoOopHashtable(int table_size, int entry_size, HashtableBucket<F>* t,
346 int number_of_entries)
347 : Hashtable<T, F>(table_size, entry_size, t, number_of_entries) {}
349 public:
350 unsigned int compute_hash(Symbol* name, ClassLoaderData* loader_data) {
351 unsigned int name_hash = name->identity_hash();
352 // loader is null with CDS
353 assert(loader_data != NULL || UseSharedSpaces || DumpSharedSpaces,
354 "only allowed with shared spaces");
355 unsigned int loader_hash = loader_data == NULL ? 0 : loader_data->identity_hash();
356 return name_hash ^ loader_hash;
357 }
359 int index_for(Symbol* name, ClassLoaderData* loader_data) {
360 return this->hash_to_index(compute_hash(name, loader_data));
361 }
362 };
364 #endif // SHARE_VM_UTILITIES_HASHTABLE_HPP