src/share/vm/oops/instanceKlass.cpp

changeset 1412
74a5db69c1fe
parent 1409
26b774d693aa
child 1432
46b819ba120b
equal deleted inserted replaced
1410:83c29a26f67c 1412:74a5db69c1fe
965 return probe; 965 return probe;
966 } 966 }
967 967
968 968
969 // Lookup or create a jmethodID. 969 // Lookup or create a jmethodID.
970 // This code can be called by the VM thread. For this reason it is critical that 970 // This code is called by the VMThread and JavaThreads so the
971 // there are no blocking operations (safepoints) while the lock is held -- or a 971 // locking has to be done very carefully to avoid deadlocks
972 // deadlock can occur. 972 // and/or other cache consistency problems.
973 jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) { 973 //
974 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
974 size_t idnum = (size_t)method_h->method_idnum(); 975 size_t idnum = (size_t)method_h->method_idnum();
975 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 976 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
976 size_t length = 0; 977 size_t length = 0;
977 jmethodID id = NULL; 978 jmethodID id = NULL;
978 // array length stored in first element, other elements offset by one 979
979 if (jmeths == NULL || // If there is no jmethodID array, 980 // We use a double-check locking idiom here because this cache is
980 (length = (size_t)jmeths[0]) <= idnum || // or if it is too short, 981 // performance sensitive. In the normal system, this cache only
981 (id = jmeths[idnum+1]) == NULL) { // or if this jmethodID isn't allocated 982 // transitions from NULL to non-NULL which is safe because we use
982 983 // release_set_methods_jmethod_ids() to advertise the new cache.
983 // Do all the safepointing things (allocations) before grabbing the lock. 984 // A partially constructed cache should never be seen by a racing
984 // These allocations will have to be freed if they are unused. 985 // thread. We also use release_store_ptr() to save a new jmethodID
985 986 // in the cache so a partially constructed jmethodID should never be
986 // Allocate a new array of methods. 987 // seen either. Cache reads of existing jmethodIDs proceed without a
988 // lock, but cache writes of a new jmethodID requires uniqueness and
989 // creation of the cache itself requires no leaks so a lock is
990 // generally acquired in those two cases.
991 //
992 // If the RedefineClasses() API has been used, then this cache can
993 // grow and we'll have transitions from non-NULL to bigger non-NULL.
994 // Cache creation requires no leaks and we require safety between all
995 // cache accesses and freeing of the old cache so a lock is generally
996 // acquired when the RedefineClasses() API has been used.
997
998 if (jmeths != NULL) {
999 // the cache already exists
1000 if (!ik_h->idnum_can_increment()) {
1001 // the cache can't grow so we can just get the current values
1002 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1003 } else {
1004 // cache can grow so we have to be more careful
1005 if (Threads::number_of_threads() == 0 ||
1006 SafepointSynchronize::is_at_safepoint()) {
1007 // we're single threaded or at a safepoint - no locking needed
1008 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1009 } else {
1010 MutexLocker ml(JmethodIdCreation_lock);
1011 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1012 }
1013 }
1014 }
1015 // implied else:
1016 // we need to allocate a cache so default length and id values are good
1017
1018 if (jmeths == NULL || // no cache yet
1019 length <= idnum || // cache is too short
1020 id == NULL) { // cache doesn't contain entry
1021
1022 // This function can be called by the VMThread so we have to do all
1023 // things that might block on a safepoint before grabbing the lock.
1024 // Otherwise, we can deadlock with the VMThread or have a cache
1025 // consistency issue. These vars keep track of what we might have
1026 // to free after the lock is dropped.
1027 jmethodID to_dealloc_id = NULL;
1028 jmethodID* to_dealloc_jmeths = NULL;
1029
1030 // may not allocate new_jmeths or use it if we allocate it
987 jmethodID* new_jmeths = NULL; 1031 jmethodID* new_jmeths = NULL;
988 if (length <= idnum) { 1032 if (length <= idnum) {
989 // A new array will be needed (unless some other thread beats us to it) 1033 // allocate a new cache that might be used
990 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); 1034 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
991 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); 1035 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
992 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); 1036 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
993 new_jmeths[0] =(jmethodID)size; // array size held in the first element 1037 // cache size is stored in element[0], other elements offset by one
994 } 1038 new_jmeths[0] = (jmethodID)size;
995 1039 }
996 // Allocate a new method ID. 1040
1041 // allocate a new jmethodID that might be used
997 jmethodID new_id = NULL; 1042 jmethodID new_id = NULL;
998 if (method_h->is_old() && !method_h->is_obsolete()) { 1043 if (method_h->is_old() && !method_h->is_obsolete()) {
999 // The method passed in is old (but not obsolete), we need to use the current version 1044 // The method passed in is old (but not obsolete), we need to use the current version
1000 methodOop current_method = ik_h->method_with_idnum((int)idnum); 1045 methodOop current_method = ik_h->method_with_idnum((int)idnum);
1001 assert(current_method != NULL, "old and but not obsolete, so should exist"); 1046 assert(current_method != NULL, "old and but not obsolete, so should exist");
1005 // It is the current version of the method or an obsolete method, 1050 // It is the current version of the method or an obsolete method,
1006 // use the version passed in 1051 // use the version passed in
1007 new_id = JNIHandles::make_jmethod_id(method_h); 1052 new_id = JNIHandles::make_jmethod_id(method_h);
1008 } 1053 }
1009 1054
1010 if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) { 1055 if (Threads::number_of_threads() == 0 ||
1011 // No need and unsafe to lock the JmethodIdCreation_lock at safepoint. 1056 SafepointSynchronize::is_at_safepoint()) {
1012 id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); 1057 // we're single threaded or at a safepoint - no locking needed
1058 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1059 &to_dealloc_id, &to_dealloc_jmeths);
1013 } else { 1060 } else {
1014 MutexLocker ml(JmethodIdCreation_lock); 1061 MutexLocker ml(JmethodIdCreation_lock);
1015 id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); 1062 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1063 &to_dealloc_id, &to_dealloc_jmeths);
1064 }
1065
1066 // The lock has been dropped so we can free resources.
1067 // Free up either the old cache or the new cache if we allocated one.
1068 if (to_dealloc_jmeths != NULL) {
1069 FreeHeap(to_dealloc_jmeths);
1070 }
1071 // free up the new ID since it wasn't needed
1072 if (to_dealloc_id != NULL) {
1073 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1016 } 1074 }
1017 } 1075 }
1018 return id; 1076 return id;
1019 } 1077 }
1020 1078
1021 1079
1022 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, 1080 // Common code to fetch the jmethodID from the cache or update the
1023 jmethodID new_id, jmethodID* new_jmeths) { 1081 // cache with the new jmethodID. This function should never do anything
1024 // Retry lookup after we got the lock or ensured we are at safepoint 1082 // that causes the caller to go to a safepoint or we can deadlock with
1083 // the VMThread or have cache consistency issues.
1084 //
1085 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1086 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1087 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1088 jmethodID** to_dealloc_jmeths_p) {
1089 assert(new_id != NULL, "sanity check");
1090 assert(to_dealloc_id_p != NULL, "sanity check");
1091 assert(to_dealloc_jmeths_p != NULL, "sanity check");
1092 assert(Threads::number_of_threads() == 0 ||
1093 SafepointSynchronize::is_at_safepoint() ||
1094 JmethodIdCreation_lock->owned_by_self(), "sanity check");
1095
1096 // reacquire the cache - we are locked, single threaded or at a safepoint
1025 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1097 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1026 jmethodID id = NULL; 1098 jmethodID id = NULL;
1027 jmethodID to_dealloc_id = NULL; 1099 size_t length = 0;
1028 jmethodID* to_dealloc_jmeths = NULL; 1100
1029 size_t length; 1101 if (jmeths == NULL || // no cache yet
1030 1102 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short
1031 if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) {
1032 if (jmeths != NULL) { 1103 if (jmeths != NULL) {
1033 // We have grown the array: copy the existing entries, and delete the old array 1104 // copy any existing entries from the old cache
1034 for (size_t index = 0; index < length; index++) { 1105 for (size_t index = 0; index < length; index++) {
1035 new_jmeths[index+1] = jmeths[index+1]; 1106 new_jmeths[index+1] = jmeths[index+1];
1036 } 1107 }
1037 to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one 1108 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
1038 } 1109 }
1039 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); 1110 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1040 } else { 1111 } else {
1112 // fetch jmethodID (if any) from the existing cache
1041 id = jmeths[idnum+1]; 1113 id = jmeths[idnum+1];
1042 to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one 1114 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
1043 } 1115 }
1044 if (id == NULL) { 1116 if (id == NULL) {
1117 // No matching jmethodID in the existing cache or we have a new
1118 // cache or we just grew the cache. This cache write is done here
1119 // by the first thread to win the foot race because a jmethodID
1120 // needs to be unique once it is generally available.
1045 id = new_id; 1121 id = new_id;
1046 jmeths[idnum+1] = id; // install the new method ID 1122
1123 // The jmethodID cache can be read while unlocked so we have to
1124 // make sure the new jmethodID is complete before installing it
1125 // in the cache.
1126 OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1047 } else { 1127 } else {
1048 to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation 1128 *to_dealloc_id_p = new_id; // save new id for later delete
1049 }
1050
1051 // Free up unneeded or no longer needed resources
1052 FreeHeap(to_dealloc_jmeths);
1053 if (to_dealloc_id != NULL) {
1054 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1055 } 1129 }
1056 return id; 1130 return id;
1131 }
1132
1133
1134 // Common code to get the jmethodID cache length and the jmethodID
1135 // value at index idnum if there is one.
1136 //
1137 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1138 size_t idnum, size_t *length_p, jmethodID* id_p) {
1139 assert(cache != NULL, "sanity check");
1140 assert(length_p != NULL, "sanity check");
1141 assert(id_p != NULL, "sanity check");
1142
1143 // cache size is stored in element[0], other elements offset by one
1144 *length_p = (size_t)cache[0];
1145 if (*length_p <= idnum) { // cache is too short
1146 *id_p = NULL;
1147 } else {
1148 *id_p = cache[idnum+1]; // fetch jmethodID (if any)
1149 }
1057 } 1150 }
1058 1151
1059 1152
1060 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles 1153 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1061 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { 1154 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1062 size_t idnum = (size_t)method->method_idnum(); 1155 size_t idnum = (size_t)method->method_idnum();
1063 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1156 jmethodID* jmeths = methods_jmethod_ids_acquire();
1064 size_t length; // length assigned as debugging crumb 1157 size_t length; // length assigned as debugging crumb
1065 jmethodID id = NULL; 1158 jmethodID id = NULL;
1066 if (jmeths != NULL && // If there is a jmethodID array, 1159 if (jmeths != NULL && // If there is a cache
1067 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, 1160 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1068 id = jmeths[idnum+1]; // Look up the id (may be NULL) 1161 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1069 } 1162 }
1070 return id; 1163 return id;
1071 } 1164 }
1072 1165
1073 1166
1074 // Cache an itable index 1167 // Cache an itable index
1075 void instanceKlass::set_cached_itable_index(size_t idnum, int index) { 1168 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1076 int* indices = methods_cached_itable_indices_acquire(); 1169 int* indices = methods_cached_itable_indices_acquire();
1077 if (indices == NULL || // If there is no index array, 1170 int* to_dealloc_indices = NULL;
1078 ((size_t)indices[0]) <= idnum) { // or if it is too short 1171
1079 // Lock before we allocate the array so we don't leak 1172 // We use a double-check locking idiom here because this cache is
1173 // performance sensitive. In the normal system, this cache only
1174 // transitions from NULL to non-NULL which is safe because we use
1175 // release_set_methods_cached_itable_indices() to advertise the
1176 // new cache. A partially constructed cache should never be seen
1177 // by a racing thread. Cache reads and writes proceed without a
1178 // lock, but creation of the cache itself requires no leaks so a
1179 // lock is generally acquired in that case.
1180 //
1181 // If the RedefineClasses() API has been used, then this cache can
1182 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1183 // Cache creation requires no leaks and we require safety between all
1184 // cache accesses and freeing of the old cache so a lock is generally
1185 // acquired when the RedefineClasses() API has been used.
1186
1187 if (indices == NULL || idnum_can_increment()) {
1188 // we need a cache or the cache can grow
1080 MutexLocker ml(JNICachedItableIndex_lock); 1189 MutexLocker ml(JNICachedItableIndex_lock);
1081 // Retry lookup after we got the lock 1190 // reacquire the cache to see if another thread already did the work
1082 indices = methods_cached_itable_indices_acquire(); 1191 indices = methods_cached_itable_indices_acquire();
1083 size_t length = 0; 1192 size_t length = 0;
1084 // array length stored in first element, other elements offset by one 1193 // cache size is stored in element[0], other elements offset by one
1085 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 1194 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1086 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 1195 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1087 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 1196 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1088 new_indices[0] =(int)size; // array size held in the first element 1197 new_indices[0] = (int)size;
1089 // Copy the existing entries, if any 1198 // copy any existing entries
1090 size_t i; 1199 size_t i;
1091 for (i = 0; i < length; i++) { 1200 for (i = 0; i < length; i++) {
1092 new_indices[i+1] = indices[i+1]; 1201 new_indices[i+1] = indices[i+1];
1093 } 1202 }
1094 // Set all the rest to -1 1203 // Set all the rest to -1
1095 for (i = length; i < size; i++) { 1204 for (i = length; i < size; i++) {
1096 new_indices[i+1] = -1; 1205 new_indices[i+1] = -1;
1097 } 1206 }
1098 if (indices != NULL) { 1207 if (indices != NULL) {
1099 FreeHeap(indices); // delete any old indices 1208 // We have an old cache to delete so save it for after we
1209 // drop the lock.
1210 to_dealloc_indices = indices;
1100 } 1211 }
1101 release_set_methods_cached_itable_indices(indices = new_indices); 1212 release_set_methods_cached_itable_indices(indices = new_indices);
1102 } 1213 }
1214
1215 if (idnum_can_increment()) {
1216 // this cache can grow so we have to write to it safely
1217 indices[idnum+1] = index;
1218 }
1103 } else { 1219 } else {
1104 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1220 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1105 } 1221 }
1106 // This is a cache, if there is a race to set it, it doesn't matter 1222
1107 indices[idnum+1] = index; 1223 if (!idnum_can_increment()) {
1224 // The cache cannot grow and this JNI itable index value does not
1225 // have to be unique like a jmethodID. If there is a race to set it,
1226 // it doesn't matter.
1227 indices[idnum+1] = index;
1228 }
1229
1230 if (to_dealloc_indices != NULL) {
1231 // we allocated a new cache so free the old one
1232 FreeHeap(to_dealloc_indices);
1233 }
1108 } 1234 }
1109 1235
1110 1236
1111 // Retrieve a cached itable index 1237 // Retrieve a cached itable index
1112 int instanceKlass::cached_itable_index(size_t idnum) { 1238 int instanceKlass::cached_itable_index(size_t idnum) {
2298 2424
2299 // RedefineClasses() support for previous versions: 2425 // RedefineClasses() support for previous versions:
2300 2426
2301 // Add an information node that contains weak references to the 2427 // Add an information node that contains weak references to the
2302 // interesting parts of the previous version of the_class. 2428 // interesting parts of the previous version of the_class.
2429 // This is also where we clean out any unused weak references.
2430 // Note that while we delete nodes from the _previous_versions
2431 // array, we never delete the array itself until the klass is
2432 // unloaded. The has_been_redefined() query depends on that fact.
2433 //
2303 void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2434 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2304 BitMap* emcp_methods, int emcp_method_count) { 2435 BitMap* emcp_methods, int emcp_method_count) {
2305 assert(Thread::current()->is_VM_thread(), 2436 assert(Thread::current()->is_VM_thread(),
2306 "only VMThread can add previous versions"); 2437 "only VMThread can add previous versions");
2307 2438

mercurial