108 bool instanceKlass::verify_code( |
108 bool instanceKlass::verify_code( |
109 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { |
109 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { |
110 // 1) Verify the bytecodes |
110 // 1) Verify the bytecodes |
111 Verifier::Mode mode = |
111 Verifier::Mode mode = |
112 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; |
112 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; |
113 return Verifier::verify(this_oop, mode, CHECK_false); |
113 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); |
114 } |
114 } |
115 |
115 |
116 |
116 |
117 // Used exclusively by the shared spaces dump mechanism to prevent |
117 // Used exclusively by the shared spaces dump mechanism to prevent |
118 // classes mapped into the shared regions in new VMs from appearing linked. |
118 // classes mapped into the shared regions in new VMs from appearing linked. |
1083 size_t length = 0; |
1083 size_t length = 0; |
1084 // array length stored in first element, other elements offset by one |
1084 // array length stored in first element, other elements offset by one |
1085 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { |
1085 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { |
1086 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); |
1086 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); |
1087 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); |
1087 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); |
|
1088 new_indices[0] =(int)size; // array size held in the first element |
1088 // Copy the existing entries, if any |
1089 // Copy the existing entries, if any |
1089 size_t i; |
1090 size_t i; |
1090 for (i = 0; i < length; i++) { |
1091 for (i = 0; i < length; i++) { |
1091 new_indices[i+1] = indices[i+1]; |
1092 new_indices[i+1] = indices[i+1]; |
1092 } |
1093 } |
1394 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ |
1395 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ |
1395 { \ |
1396 { \ |
1396 /* Compute oopmap block range. The common case \ |
1397 /* Compute oopmap block range. The common case \ |
1397 is nonstatic_oop_map_size == 1. */ \ |
1398 is nonstatic_oop_map_size == 1. */ \ |
1398 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
1399 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
1399 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
1400 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ |
1400 if (UseCompressedOops) { \ |
1401 if (UseCompressedOops) { \ |
1401 while (map < end_map) { \ |
1402 while (map < end_map) { \ |
1402 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ |
1403 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ |
1403 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
1404 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ |
1404 do_oop, assert_fn) \ |
1405 do_oop, assert_fn) \ |
1405 ++map; \ |
1406 ++map; \ |
1406 } \ |
1407 } \ |
1407 } else { \ |
1408 } else { \ |
1408 while (map < end_map) { \ |
1409 while (map < end_map) { \ |
1409 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ |
1410 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ |
1410 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
1411 obj->obj_field_addr<oop>(map->offset()), map->count(), \ |
1411 do_oop, assert_fn) \ |
1412 do_oop, assert_fn) \ |
1412 ++map; \ |
1413 ++map; \ |
1413 } \ |
1414 } \ |
1414 } \ |
1415 } \ |
1415 } |
1416 } |
1416 |
1417 |
1417 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ |
1418 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ |
1418 { \ |
1419 { \ |
1419 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ |
1420 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ |
1420 OopMapBlock* map = start_map + nonstatic_oop_map_size(); \ |
1421 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ |
1421 if (UseCompressedOops) { \ |
1422 if (UseCompressedOops) { \ |
1422 while (start_map < map) { \ |
1423 while (start_map < map) { \ |
1423 --map; \ |
1424 --map; \ |
1424 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ |
1425 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ |
1425 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
1426 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ |
1426 do_oop, assert_fn) \ |
1427 do_oop, assert_fn) \ |
1427 } \ |
1428 } \ |
1428 } else { \ |
1429 } else { \ |
1429 while (start_map < map) { \ |
1430 while (start_map < map) { \ |
1430 --map; \ |
1431 --map; \ |
1431 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ |
1432 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ |
1432 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
1433 obj->obj_field_addr<oop>(map->offset()), map->count(), \ |
1433 do_oop, assert_fn) \ |
1434 do_oop, assert_fn) \ |
1434 } \ |
1435 } \ |
1435 } \ |
1436 } \ |
1436 } |
1437 } |
1437 |
1438 |
1441 /* Compute oopmap block range. The common case is \ |
1442 /* Compute oopmap block range. The common case is \ |
1442 nonstatic_oop_map_size == 1, so we accept the \ |
1443 nonstatic_oop_map_size == 1, so we accept the \ |
1443 usually non-existent extra overhead of examining \ |
1444 usually non-existent extra overhead of examining \ |
1444 all the maps. */ \ |
1445 all the maps. */ \ |
1445 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
1446 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
1446 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
1447 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ |
1447 if (UseCompressedOops) { \ |
1448 if (UseCompressedOops) { \ |
1448 while (map < end_map) { \ |
1449 while (map < end_map) { \ |
1449 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ |
1450 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ |
1450 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
1451 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ |
1451 low, high, \ |
1452 low, high, \ |
1452 do_oop, assert_fn) \ |
1453 do_oop, assert_fn) \ |
1453 ++map; \ |
1454 ++map; \ |
1454 } \ |
1455 } \ |
1455 } else { \ |
1456 } else { \ |
1456 while (map < end_map) { \ |
1457 while (map < end_map) { \ |
1457 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ |
1458 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ |
1458 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
1459 obj->obj_field_addr<oop>(map->offset()), map->count(), \ |
1459 low, high, \ |
1460 low, high, \ |
1460 do_oop, assert_fn) \ |
1461 do_oop, assert_fn) \ |
1461 ++map; \ |
1462 ++map; \ |
1462 } \ |
1463 } \ |
1463 } \ |
1464 } \ |
2214 static bool first_time = true; |
2215 static bool first_time = true; |
2215 guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps"); |
2216 guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps"); |
2216 first_time = false; |
2217 first_time = false; |
2217 const int extra = java_lang_Class::number_of_fake_oop_fields; |
2218 const int extra = java_lang_Class::number_of_fake_oop_fields; |
2218 guarantee(ik->nonstatic_field_size() == extra, "just checking"); |
2219 guarantee(ik->nonstatic_field_size() == extra, "just checking"); |
2219 guarantee(ik->nonstatic_oop_map_size() == 1, "just checking"); |
2220 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking"); |
2220 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking"); |
2221 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking"); |
2221 |
2222 |
2222 // Check that the map is (2,extra) |
2223 // Check that the map is (2,extra) |
2223 int offset = java_lang_Class::klass_offset; |
2224 int offset = java_lang_Class::klass_offset; |
2224 |
2225 |
2225 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); |
2226 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); |
2226 guarantee(map->offset() == offset && map->length() == extra, "just checking"); |
2227 guarantee(map->offset() == offset && map->count() == (unsigned int) extra, |
|
2228 "sanity"); |
2227 } |
2229 } |
2228 } |
2230 } |
2229 |
2231 |
2230 #endif // ndef PRODUCT |
2232 #endif // ndef PRODUCT |
2231 |
2233 |