src/share/vm/code/nmethod.cpp

changeset 6992
2c6ef90f030a
parent 6991
882004b9e7e1
child 7333
b12a2a9b05ca
equal deleted inserted replaced
6991:882004b9e7e1 6992:2c6ef90f030a
47 #include "shark/sharkCompiler.hpp" 47 #include "shark/sharkCompiler.hpp"
48 #endif 48 #endif
49 49
50 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 50 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
51 51
52 unsigned char nmethod::_global_unloading_clock = 0;
53
52 #ifdef DTRACE_ENABLED 54 #ifdef DTRACE_ENABLED
53 55
54 // Only bother with this argument setup if dtrace is available 56 // Only bother with this argument setup if dtrace is available
55 57
56 #ifndef USDT2 58 #ifndef USDT2
464 } 466 }
465 467
466 // Fill in default values for various flag fields 468 // Fill in default values for various flag fields
467 void nmethod::init_defaults() { 469 void nmethod::init_defaults() {
468 _state = in_use; 470 _state = in_use;
471 _unloading_clock = 0;
469 _marked_for_reclamation = 0; 472 _marked_for_reclamation = 0;
470 _has_flushed_dependencies = 0; 473 _has_flushed_dependencies = 0;
471 _has_unsafe_access = 0; 474 _has_unsafe_access = 0;
472 _has_method_handle_invokes = 0; 475 _has_method_handle_invokes = 0;
473 _lazy_critical_native = 0; 476 _lazy_critical_native = 0;
482 #endif 485 #endif
483 486
484 _oops_do_mark_link = NULL; 487 _oops_do_mark_link = NULL;
485 _jmethod_id = NULL; 488 _jmethod_id = NULL;
486 _osr_link = NULL; 489 _osr_link = NULL;
487 _scavenge_root_link = NULL; 490 if (UseG1GC) {
491 _unloading_next = NULL;
492 } else {
493 _scavenge_root_link = NULL;
494 }
488 _scavenge_root_state = 0; 495 _scavenge_root_state = 0;
489 _compiler = NULL; 496 _compiler = NULL;
490 #if INCLUDE_RTM_OPT 497 #if INCLUDE_RTM_OPT
491 _rtm_state = NoRTM; 498 _rtm_state = NoRTM;
492 #endif 499 #endif
1188 } 1195 }
1189 } 1196 }
1190 } 1197 }
1191 } 1198 }
1192 1199
1200 void nmethod::verify_clean_inline_caches() {
1201 assert_locked_or_safepoint(CompiledIC_lock);
1202
1203 // If the method is not entrant or zombie then a JMP is plastered over the
1204 // first few bytes. If an oop in the old code was there, that oop
1205 // should not get GC'd. Skip the first few bytes of oops on
1206 // not-entrant methods.
1207 address low_boundary = verified_entry_point();
1208 if (!is_in_use()) {
1209 low_boundary += NativeJump::instruction_size;
1210 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1211 // This means that the low_boundary is going to be a little too high.
1212 // This shouldn't matter, since oops of non-entrant methods are never used.
1213 // In fact, why are we bothering to look at oops in a non-entrant method??
1214 }
1215
1216 ResourceMark rm;
1217 RelocIterator iter(this, low_boundary);
1218 while(iter.next()) {
1219 switch(iter.type()) {
1220 case relocInfo::virtual_call_type:
1221 case relocInfo::opt_virtual_call_type: {
1222 CompiledIC *ic = CompiledIC_at(&iter);
1223 // Ok, to lookup references to zombies here
1224 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1225 if( cb != NULL && cb->is_nmethod() ) {
1226 nmethod* nm = (nmethod*)cb;
1227 // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1228 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1229 assert(ic->is_clean(), "IC should be clean");
1230 }
1231 }
1232 break;
1233 }
1234 case relocInfo::static_call_type: {
1235 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1236 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1237 if( cb != NULL && cb->is_nmethod() ) {
1238 nmethod* nm = (nmethod*)cb;
1239 // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1240 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1241 assert(csc->is_clean(), "IC should be clean");
1242 }
1243 }
1244 break;
1245 }
1246 }
1247 }
1248 }
1249
1250 int nmethod::verify_icholder_relocations() {
1251 int count = 0;
1252
1253 RelocIterator iter(this);
1254 while(iter.next()) {
1255 if (iter.type() == relocInfo::virtual_call_type) {
1256 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1257 CompiledIC *ic = CompiledIC_at(&iter);
1258 if (TraceCompiledIC) {
1259 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1260 ic->print();
1261 }
1262 assert(ic->cached_icholder() != NULL, "must be non-NULL");
1263 count++;
1264 }
1265 }
1266 }
1267
1268 return count;
1269 }
1270
1193 // This is a private interface with the sweeper. 1271 // This is a private interface with the sweeper.
1194 void nmethod::mark_as_seen_on_stack() { 1272 void nmethod::mark_as_seen_on_stack() {
1195 assert(is_alive(), "Must be an alive method"); 1273 assert(is_alive(), "Must be an alive method");
1196 // Set the traversal mark to ensure that the sweeper does 2 1274 // Set the traversal mark to ensure that the sweeper does 2
1197 // cleaning passes before moving to zombie. 1275 // cleaning passes before moving to zombie.
1220 if (mdo == NULL) return; 1298 if (mdo == NULL) return;
1221 // There is a benign race here. See comments in methodData.hpp. 1299 // There is a benign race here. See comments in methodData.hpp.
1222 mdo->inc_decompile_count(); 1300 mdo->inc_decompile_count();
1223 } 1301 }
1224 1302
1303 void nmethod::increase_unloading_clock() {
1304 _global_unloading_clock++;
1305 if (_global_unloading_clock == 0) {
1306 // _nmethods are allocated with _unloading_clock == 0,
1307 // so 0 is never used as a clock value.
1308 _global_unloading_clock = 1;
1309 }
1310 }
1311
1312 void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1313 OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1314 }
1315
1316 unsigned char nmethod::unloading_clock() {
1317 return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1318 }
1319
1225 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { 1320 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1226 1321
1227 post_compiled_method_unload(); 1322 post_compiled_method_unload();
1228 1323
1229 // Since this nmethod is being unloaded, make sure that dependencies 1324 // Since this nmethod is being unloaded, make sure that dependencies
1265 // Transitioning directly from live to unloaded -- so 1360 // Transitioning directly from live to unloaded -- so
1266 // we need to force a cache clean-up; remember this 1361 // we need to force a cache clean-up; remember this
1267 // for later on. 1362 // for later on.
1268 CodeCache::set_needs_cache_clean(true); 1363 CodeCache::set_needs_cache_clean(true);
1269 } 1364 }
1365
1366 // Unregister must be done before the state change
1367 Universe::heap()->unregister_nmethod(this);
1368
1270 _state = unloaded; 1369 _state = unloaded;
1271 1370
1272 // Log the unloading. 1371 // Log the unloading.
1273 log_state_change(); 1372 log_state_change();
1274 1373
1619 // attempt to report the event in the unlikely scenario where the 1718 // attempt to report the event in the unlikely scenario where the
1620 // event is enabled at the time the nmethod is made a zombie. 1719 // event is enabled at the time the nmethod is made a zombie.
1621 set_unload_reported(); 1720 set_unload_reported();
1622 } 1721 }
1623 1722
1723 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1724 if (ic->is_icholder_call()) {
1725 // The only exception is compiledICHolder oops which may
1726 // yet be marked below. (We check this further below).
1727 CompiledICHolder* cichk_oop = ic->cached_icholder();
1728 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1729 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1730 return;
1731 }
1732 } else {
1733 Metadata* ic_oop = ic->cached_metadata();
1734 if (ic_oop != NULL) {
1735 if (ic_oop->is_klass()) {
1736 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1737 return;
1738 }
1739 } else if (ic_oop->is_method()) {
1740 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1741 return;
1742 }
1743 } else {
1744 ShouldNotReachHere();
1745 }
1746 }
1747 }
1748
1749 ic->set_to_clean();
1750 }
1751
1624 // This is called at the end of the strong tracing/marking phase of a 1752 // This is called at the end of the strong tracing/marking phase of a
1625 // GC to unload an nmethod if it contains otherwise unreachable 1753 // GC to unload an nmethod if it contains otherwise unreachable
1626 // oops. 1754 // oops.
1627 1755
1628 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 1756 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1662 if (unloading_occurred) { 1790 if (unloading_occurred) {
1663 RelocIterator iter(this, low_boundary); 1791 RelocIterator iter(this, low_boundary);
1664 while(iter.next()) { 1792 while(iter.next()) {
1665 if (iter.type() == relocInfo::virtual_call_type) { 1793 if (iter.type() == relocInfo::virtual_call_type) {
1666 CompiledIC *ic = CompiledIC_at(&iter); 1794 CompiledIC *ic = CompiledIC_at(&iter);
1667 if (ic->is_icholder_call()) { 1795 clean_ic_if_metadata_is_dead(ic, is_alive);
1668 // The only exception is compiledICHolder oops which may
1669 // yet be marked below. (We check this further below).
1670 CompiledICHolder* cichk_oop = ic->cached_icholder();
1671 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1672 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1673 continue;
1674 }
1675 } else {
1676 Metadata* ic_oop = ic->cached_metadata();
1677 if (ic_oop != NULL) {
1678 if (ic_oop->is_klass()) {
1679 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1680 continue;
1681 }
1682 } else if (ic_oop->is_method()) {
1683 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1684 continue;
1685 }
1686 } else {
1687 ShouldNotReachHere();
1688 }
1689 }
1690 }
1691 ic->set_to_clean();
1692 } 1796 }
1693 } 1797 }
1694 } 1798 }
1695 1799
1696 // Compiled code 1800 // Compiled code
1722 } 1826 }
1723 } 1827 }
1724 1828
1725 // Ensure that all metadata is still alive 1829 // Ensure that all metadata is still alive
1726 verify_metadata_loaders(low_boundary, is_alive); 1830 verify_metadata_loaders(low_boundary, is_alive);
1831 }
1832
1833 template <class CompiledICorStaticCall>
1834 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1835 // Ok, to lookup references to zombies here
1836 CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1837 if (cb != NULL && cb->is_nmethod()) {
1838 nmethod* nm = (nmethod*)cb;
1839
1840 if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1841 // The nmethod has not been processed yet.
1842 return true;
1843 }
1844
1845 // Clean inline caches pointing to both zombie and not_entrant methods
1846 if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1847 ic->set_to_clean();
1848 assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1849 }
1850 }
1851
1852 return false;
1853 }
1854
1855 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1856 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1857 }
1858
1859 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1860 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1861 }
1862
1863 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1864 ResourceMark rm;
1865
1866 // Make sure the oop's ready to receive visitors
1867 assert(!is_zombie() && !is_unloaded(),
1868 "should not call follow on zombie or unloaded nmethod");
1869
1870 // If the method is not entrant then a JMP is plastered over the
1871 // first few bytes. If an oop in the old code was there, that oop
1872 // should not get GC'd. Skip the first few bytes of oops on
1873 // not-entrant methods.
1874 address low_boundary = verified_entry_point();
1875 if (is_not_entrant()) {
1876 low_boundary += NativeJump::instruction_size;
1877 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1878 // (See comment above.)
1879 }
1880
1881 // The RedefineClasses() API can cause the class unloading invariant
1882 // to no longer be true. See jvmtiExport.hpp for details.
1883 // Also, leave a debugging breadcrumb in local flag.
1884 bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1885 if (a_class_was_redefined) {
1886 // This set of the unloading_occurred flag is done before the
1887 // call to post_compiled_method_unload() so that the unloading
1888 // of this nmethod is reported.
1889 unloading_occurred = true;
1890 }
1891
1892 // Exception cache
1893 clean_exception_cache(is_alive);
1894
1895 bool is_unloaded = false;
1896 bool postponed = false;
1897
1898 RelocIterator iter(this, low_boundary);
1899 while(iter.next()) {
1900
1901 switch (iter.type()) {
1902
1903 case relocInfo::virtual_call_type:
1904 if (unloading_occurred) {
1905 // If class unloading occurred we first iterate over all inline caches and
1906 // clear ICs where the cached oop is referring to an unloaded klass or method.
1907 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1908 }
1909
1910 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1911 break;
1912
1913 case relocInfo::opt_virtual_call_type:
1914 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1915 break;
1916
1917 case relocInfo::static_call_type:
1918 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1919 break;
1920
1921 case relocInfo::oop_type:
1922 if (!is_unloaded) {
1923 // Unload check
1924 oop_Relocation* r = iter.oop_reloc();
1925 // Traverse those oops directly embedded in the code.
1926 // Other oops (oop_index>0) are seen as part of scopes_oops.
1927 assert(1 == (r->oop_is_immediate()) +
1928 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1929 "oop must be found in exactly one place");
1930 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1931 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1932 is_unloaded = true;
1933 }
1934 }
1935 }
1936 break;
1937
1938 }
1939 }
1940
1941 if (is_unloaded) {
1942 return postponed;
1943 }
1944
1945 // Scopes
1946 for (oop* p = oops_begin(); p < oops_end(); p++) {
1947 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1948 if (can_unload(is_alive, p, unloading_occurred)) {
1949 is_unloaded = true;
1950 break;
1951 }
1952 }
1953
1954 if (is_unloaded) {
1955 return postponed;
1956 }
1957
1958 // Ensure that all metadata is still alive
1959 verify_metadata_loaders(low_boundary, is_alive);
1960
1961 return postponed;
1962 }
1963
1964 void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
1965 ResourceMark rm;
1966
1967 // Make sure the oop's ready to receive visitors
1968 assert(!is_zombie(),
1969 "should not call follow on zombie nmethod");
1970
1971 // If the method is not entrant then a JMP is plastered over the
1972 // first few bytes. If an oop in the old code was there, that oop
1973 // should not get GC'd. Skip the first few bytes of oops on
1974 // not-entrant methods.
1975 address low_boundary = verified_entry_point();
1976 if (is_not_entrant()) {
1977 low_boundary += NativeJump::instruction_size;
1978 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1979 // (See comment above.)
1980 }
1981
1982 RelocIterator iter(this, low_boundary);
1983 while(iter.next()) {
1984
1985 switch (iter.type()) {
1986
1987 case relocInfo::virtual_call_type:
1988 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1989 break;
1990
1991 case relocInfo::opt_virtual_call_type:
1992 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1993 break;
1994
1995 case relocInfo::static_call_type:
1996 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1997 break;
1998 }
1999 }
1727 } 2000 }
1728 2001
1729 #ifdef ASSERT 2002 #ifdef ASSERT
1730 2003
1731 class CheckClass : AllStatic { 2004 class CheckClass : AllStatic {
1940 nmethod* cur = _oops_do_mark_nmethods; 2213 nmethod* cur = _oops_do_mark_nmethods;
1941 while (cur != NMETHOD_SENTINEL) { 2214 while (cur != NMETHOD_SENTINEL) {
1942 assert(cur != NULL, "not NULL-terminated"); 2215 assert(cur != NULL, "not NULL-terminated");
1943 nmethod* next = cur->_oops_do_mark_link; 2216 nmethod* next = cur->_oops_do_mark_link;
1944 cur->_oops_do_mark_link = NULL; 2217 cur->_oops_do_mark_link = NULL;
1945 cur->fix_oop_relocations(); 2218 cur->verify_oop_relocations();
1946 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); 2219 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
1947 cur = next; 2220 cur = next;
1948 } 2221 }
1949 void* required = _oops_do_mark_nmethods; 2222 void* required = _oops_do_mark_nmethods;
1950 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); 2223 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2482 } 2755 }
2483 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } 2756 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2484 }; 2757 };
2485 2758
2486 void nmethod::verify_scavenge_root_oops() { 2759 void nmethod::verify_scavenge_root_oops() {
2760 if (UseG1GC) {
2761 return;
2762 }
2763
2487 if (!on_scavenge_root_list()) { 2764 if (!on_scavenge_root_list()) {
2488 // Actually look inside, to verify the claim that it's clean. 2765 // Actually look inside, to verify the claim that it's clean.
2489 DebugScavengeRoot debug_scavenge_root(this); 2766 DebugScavengeRoot debug_scavenge_root(this);
2490 oops_do(&debug_scavenge_root); 2767 oops_do(&debug_scavenge_root);
2491 if (!debug_scavenge_root.ok()) 2768 if (!debug_scavenge_root.ok())

mercurial