1.1 --- a/src/share/vm/memory/metaspace.cpp Mon Feb 10 10:34:52 2014 +0100 1.2 +++ b/src/share/vm/memory/metaspace.cpp Tue Jan 07 13:26:56 2014 -0500 1.3 @@ -513,8 +513,6 @@ 1.4 // Unlink empty VirtualSpaceNodes and free it. 1.5 void purge(ChunkManager* chunk_manager); 1.6 1.7 - bool contains(const void *ptr); 1.8 - 1.9 void print_on(outputStream* st) const; 1.10 1.11 class VirtualSpaceListIterator : public StackObj { 1.12 @@ -558,7 +556,7 @@ 1.13 1.14 private: 1.15 1.16 - // protects allocations and contains. 1.17 + // protects allocations 1.18 Mutex* const _lock; 1.19 1.20 // Type of metadata allocated. 1.21 @@ -595,7 +593,11 @@ 1.22 private: 1.23 // Accessors 1.24 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 1.25 - void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; } 1.26 + void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 1.27 + // ensure lock-free iteration sees fully initialized node 1.28 + OrderAccess::storestore(); 1.29 + _chunks_in_use[index] = v; 1.30 + } 1.31 1.32 BlockFreelist* block_freelists() const { 1.33 return (BlockFreelist*) &_block_freelists; 1.34 @@ -708,6 +710,8 @@ 1.35 void print_on(outputStream* st) const; 1.36 void locked_print_chunks_in_use_on(outputStream* st) const; 1.37 1.38 + bool contains(const void *ptr); 1.39 + 1.40 void verify(); 1.41 void verify_chunk_size(Metachunk* chunk); 1.42 NOT_PRODUCT(void mangle_freed_chunks();) 1.43 @@ -1159,8 +1163,6 @@ 1.44 } else { 1.45 assert(new_entry->reserved_words() == vs_word_size, 1.46 "Reserved memory size differs from requested memory size"); 1.47 - // ensure lock-free iteration sees fully initialized node 1.48 - OrderAccess::storestore(); 1.49 link_vs(new_entry); 1.50 return true; 1.51 } 1.52 @@ -1287,19 +1289,6 @@ 1.53 } 1.54 } 1.55 1.56 -bool VirtualSpaceList::contains(const void *ptr) { 1.57 - VirtualSpaceNode* list = virtual_space_list(); 1.58 - VirtualSpaceListIterator iter(list); 1.59 - while (iter.repeat()) { 1.60 - VirtualSpaceNode* node = iter.get_next(); 1.61 - if (node->reserved()->contains(ptr)) { 1.62 - return true; 1.63 - } 1.64 - } 1.65 - return false; 1.66 -} 1.67 - 1.68 - 1.69 // MetaspaceGC methods 1.70 1.71 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1.72 @@ -2392,6 +2381,21 @@ 1.73 return result; 1.74 } 1.75 1.76 +// This function looks at the chunks in the metaspace without locking. 1.77 +// The chunks are added with store ordering and not deleted except for at 1.78 +// unloading time. 1.79 +bool SpaceManager::contains(const void *ptr) { 1.80 + for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) 1.81 + { 1.82 + Metachunk* curr = chunks_in_use(i); 1.83 + while (curr != NULL) { 1.84 + if (curr->contains(ptr)) return true; 1.85 + curr = curr->next(); 1.86 + } 1.87 + } 1.88 + return false; 1.89 +} 1.90 + 1.91 void SpaceManager::verify() { 1.92 // If there are blocks in the dictionary, then 1.93 // verfication of chunks does not work since 1.94 @@ -3463,17 +3467,12 @@ 1.95 } 1.96 } 1.97 1.98 -bool Metaspace::contains(const void * ptr) { 1.99 - if (MetaspaceShared::is_in_shared_space(ptr)) { 1.100 - return true; 1.101 +bool Metaspace::contains(const void* ptr) { 1.102 + if (vsm()->contains(ptr)) return true; 1.103 + if (using_class_space()) { 1.104 + return class_vsm()->contains(ptr); 1.105 } 1.106 - // This is checked while unlocked. As long as the virtualspaces are added 1.107 - // at the end, the pointer will be in one of them. The virtual spaces 1.108 - // aren't deleted presently. When they are, some sort of locking might 1.109 - // be needed. Note, locking this can cause inversion problems with the 1.110 - // caller in MetaspaceObj::is_metadata() function. 1.111 - return space_list()->contains(ptr) || 1.112 - (using_class_space() && class_space_list()->contains(ptr)); 1.113 + return false; 1.114 } 1.115 1.116 void Metaspace::verify() {