src/share/vm/code/nmethod.cpp

Thu, 08 Oct 2015 09:37:23 +0200

author
thartmann
date
Thu, 08 Oct 2015 09:37:23 +0200
changeset 8073
682119c4c32e
parent 7333
b12a2a9b05ca
child 8074
c1950f51ed60
permissions
-rw-r--r--

8058737: CodeCache::find_blob fails with 'unsafe access to zombie method'
Summary: Remove active ICStubs from zombie nmethods
Reviewed-by: kvn, iveresov

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/codeCache.hpp"
    27 #include "code/compiledIC.hpp"
    28 #include "code/dependencies.hpp"
    29 #include "code/nmethod.hpp"
    30 #include "code/scopeDesc.hpp"
    31 #include "compiler/abstractCompiler.hpp"
    32 #include "compiler/compileBroker.hpp"
    33 #include "compiler/compileLog.hpp"
    34 #include "compiler/compilerOracle.hpp"
    35 #include "compiler/disassembler.hpp"
    36 #include "interpreter/bytecode.hpp"
    37 #include "oops/methodData.hpp"
    38 #include "prims/jvmtiRedefineClassesTrace.hpp"
    39 #include "prims/jvmtiImpl.hpp"
    40 #include "runtime/orderAccess.inline.hpp"
    41 #include "runtime/sharedRuntime.hpp"
    42 #include "runtime/sweeper.hpp"
    43 #include "utilities/dtrace.hpp"
    44 #include "utilities/events.hpp"
    45 #include "utilities/xmlstream.hpp"
    46 #ifdef SHARK
    47 #include "shark/sharkCompiler.hpp"
    48 #endif
    50 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    52 unsigned char nmethod::_global_unloading_clock = 0;
    54 #ifdef DTRACE_ENABLED
    56 // Only bother with this argument setup if dtrace is available
    58 #ifndef USDT2
    59 HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
    60   const char*, int, const char*, int, const char*, int, void*, size_t);
    62 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
    63   char*, int, char*, int, char*, int);
    65 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
    66   {                                                                       \
    67     Method* m = (method);                                                 \
    68     if (m != NULL) {                                                      \
    69       Symbol* klass_name = m->klass_name();                               \
    70       Symbol* name = m->name();                                           \
    71       Symbol* signature = m->signature();                                 \
    72       HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
    73         klass_name->bytes(), klass_name->utf8_length(),                   \
    74         name->bytes(), name->utf8_length(),                               \
    75         signature->bytes(), signature->utf8_length());                    \
    76     }                                                                     \
    77   }
    78 #else /* USDT2 */
    79 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
    80   {                                                                       \
    81     Method* m = (method);                                                 \
    82     if (m != NULL) {                                                      \
    83       Symbol* klass_name = m->klass_name();                               \
    84       Symbol* name = m->name();                                           \
    85       Symbol* signature = m->signature();                                 \
    86       HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
    87         (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
    88         (char *) name->bytes(), name->utf8_length(),                               \
    89         (char *) signature->bytes(), signature->utf8_length());                    \
    90     }                                                                     \
    91   }
    92 #endif /* USDT2 */
    94 #else //  ndef DTRACE_ENABLED
    96 #define DTRACE_METHOD_UNLOAD_PROBE(method)
    98 #endif
   100 bool nmethod::is_compiled_by_c1() const {
   101   if (compiler() == NULL) {
   102     return false;
   103   }
   104   return compiler()->is_c1();
   105 }
   106 bool nmethod::is_compiled_by_c2() const {
   107   if (compiler() == NULL) {
   108     return false;
   109   }
   110   return compiler()->is_c2();
   111 }
   112 bool nmethod::is_compiled_by_shark() const {
   113   if (compiler() == NULL) {
   114     return false;
   115   }
   116   return compiler()->is_shark();
   117 }
   121 //---------------------------------------------------------------------------------
   122 // NMethod statistics
   123 // They are printed under various flags, including:
   124 //   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
   125 // (In the latter two cases, they like other stats are printed to the log only.)
   127 #ifndef PRODUCT
   128 // These variables are put into one block to reduce relocations
   129 // and make it simpler to print from the debugger.
   130 static
   131 struct nmethod_stats_struct {
   132   int nmethod_count;
   133   int total_size;
   134   int relocation_size;
   135   int consts_size;
   136   int insts_size;
   137   int stub_size;
   138   int scopes_data_size;
   139   int scopes_pcs_size;
   140   int dependencies_size;
   141   int handler_table_size;
   142   int nul_chk_table_size;
   143   int oops_size;
   145   void note_nmethod(nmethod* nm) {
   146     nmethod_count += 1;
   147     total_size          += nm->size();
   148     relocation_size     += nm->relocation_size();
   149     consts_size         += nm->consts_size();
   150     insts_size          += nm->insts_size();
   151     stub_size           += nm->stub_size();
   152     oops_size           += nm->oops_size();
   153     scopes_data_size    += nm->scopes_data_size();
   154     scopes_pcs_size     += nm->scopes_pcs_size();
   155     dependencies_size   += nm->dependencies_size();
   156     handler_table_size  += nm->handler_table_size();
   157     nul_chk_table_size  += nm->nul_chk_table_size();
   158   }
   159   void print_nmethod_stats() {
   160     if (nmethod_count == 0)  return;
   161     tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
   162     if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
   163     if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
   164     if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
   165     if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
   166     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
   167     if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
   168     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
   169     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
   170     if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
   171     if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
   172     if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
   173   }
   175   int native_nmethod_count;
   176   int native_total_size;
   177   int native_relocation_size;
   178   int native_insts_size;
   179   int native_oops_size;
   180   void note_native_nmethod(nmethod* nm) {
   181     native_nmethod_count += 1;
   182     native_total_size       += nm->size();
   183     native_relocation_size  += nm->relocation_size();
   184     native_insts_size       += nm->insts_size();
   185     native_oops_size        += nm->oops_size();
   186   }
   187   void print_native_nmethod_stats() {
   188     if (native_nmethod_count == 0)  return;
   189     tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
   190     if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
   191     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
   192     if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
   193     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
   194   }
   196   int pc_desc_resets;   // number of resets (= number of caches)
   197   int pc_desc_queries;  // queries to nmethod::find_pc_desc
   198   int pc_desc_approx;   // number of those which have approximate true
   199   int pc_desc_repeats;  // number of _pc_descs[0] hits
   200   int pc_desc_hits;     // number of LRU cache hits
   201   int pc_desc_tests;    // total number of PcDesc examinations
   202   int pc_desc_searches; // total number of quasi-binary search steps
   203   int pc_desc_adds;     // number of LUR cache insertions
   205   void print_pc_stats() {
   206     tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
   207                   pc_desc_queries,
   208                   (double)(pc_desc_tests + pc_desc_searches)
   209                   / pc_desc_queries);
   210     tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
   211                   pc_desc_resets,
   212                   pc_desc_queries, pc_desc_approx,
   213                   pc_desc_repeats, pc_desc_hits,
   214                   pc_desc_tests, pc_desc_searches, pc_desc_adds);
   215   }
   216 } nmethod_stats;
   217 #endif //PRODUCT
   220 //---------------------------------------------------------------------------------
   223 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
   224   assert(pc != NULL, "Must be non null");
   225   assert(exception.not_null(), "Must be non null");
   226   assert(handler != NULL, "Must be non null");
   228   _count = 0;
   229   _exception_type = exception->klass();
   230   _next = NULL;
   232   add_address_and_handler(pc,handler);
   233 }
   236 address ExceptionCache::match(Handle exception, address pc) {
   237   assert(pc != NULL,"Must be non null");
   238   assert(exception.not_null(),"Must be non null");
   239   if (exception->klass() == exception_type()) {
   240     return (test_address(pc));
   241   }
   243   return NULL;
   244 }
   247 bool ExceptionCache::match_exception_with_space(Handle exception) {
   248   assert(exception.not_null(),"Must be non null");
   249   if (exception->klass() == exception_type() && count() < cache_size) {
   250     return true;
   251   }
   252   return false;
   253 }
   256 address ExceptionCache::test_address(address addr) {
   257   for (int i=0; i<count(); i++) {
   258     if (pc_at(i) == addr) {
   259       return handler_at(i);
   260     }
   261   }
   262   return NULL;
   263 }
   266 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
   267   if (test_address(addr) == handler) return true;
   268   if (count() < cache_size) {
   269     set_pc_at(count(),addr);
   270     set_handler_at(count(), handler);
   271     increment_count();
   272     return true;
   273   }
   274   return false;
   275 }
   278 // private method for handling exception cache
   279 // These methods are private, and used to manipulate the exception cache
   280 // directly.
   281 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
   282   ExceptionCache* ec = exception_cache();
   283   while (ec != NULL) {
   284     if (ec->match_exception_with_space(exception)) {
   285       return ec;
   286     }
   287     ec = ec->next();
   288   }
   289   return NULL;
   290 }
   293 //-----------------------------------------------------------------------------
   296 // Helper used by both find_pc_desc methods.
   297 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
   298   NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
   299   if (!approximate)
   300     return pc->pc_offset() == pc_offset;
   301   else
   302     return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
   303 }
   305 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
   306   if (initial_pc_desc == NULL) {
   307     _pc_descs[0] = NULL; // native method; no PcDescs at all
   308     return;
   309   }
   310   NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
   311   // reset the cache by filling it with benign (non-null) values
   312   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
   313   for (int i = 0; i < cache_size; i++)
   314     _pc_descs[i] = initial_pc_desc;
   315 }
   317 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
   318   NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
   319   NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
   321   // Note: one might think that caching the most recently
   322   // read value separately would be a win, but one would be
   323   // wrong.  When many threads are updating it, the cache
   324   // line it's in would bounce between caches, negating
   325   // any benefit.
   327   // In order to prevent race conditions do not load cache elements
   328   // repeatedly, but use a local copy:
   329   PcDesc* res;
   331   // Step one:  Check the most recently added value.
   332   res = _pc_descs[0];
   333   if (res == NULL) return NULL;  // native method; no PcDescs at all
   334   if (match_desc(res, pc_offset, approximate)) {
   335     NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
   336     return res;
   337   }
   339   // Step two:  Check the rest of the LRU cache.
   340   for (int i = 1; i < cache_size; ++i) {
   341     res = _pc_descs[i];
   342     if (res->pc_offset() < 0) break;  // optimization: skip empty cache
   343     if (match_desc(res, pc_offset, approximate)) {
   344       NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
   345       return res;
   346     }
   347   }
   349   // Report failure.
   350   return NULL;
   351 }
   353 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
   354   NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
   355   // Update the LRU cache by shifting pc_desc forward.
   356   for (int i = 0; i < cache_size; i++)  {
   357     PcDesc* next = _pc_descs[i];
   358     _pc_descs[i] = pc_desc;
   359     pc_desc = next;
   360   }
   361 }
   363 // adjust pcs_size so that it is a multiple of both oopSize and
   364 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
   365 // of oopSize, then 2*sizeof(PcDesc) is)
   366 static int adjust_pcs_size(int pcs_size) {
   367   int nsize = round_to(pcs_size,   oopSize);
   368   if ((nsize % sizeof(PcDesc)) != 0) {
   369     nsize = pcs_size + sizeof(PcDesc);
   370   }
   371   assert((nsize % oopSize) == 0, "correct alignment");
   372   return nsize;
   373 }
   375 //-----------------------------------------------------------------------------
   378 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
   379   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
   380   assert(new_entry != NULL,"Must be non null");
   381   assert(new_entry->next() == NULL, "Must be null");
   383   if (exception_cache() != NULL) {
   384     new_entry->set_next(exception_cache());
   385   }
   386   set_exception_cache(new_entry);
   387 }
   389 void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
   390   ExceptionCache* prev = NULL;
   391   ExceptionCache* curr = exception_cache();
   393   while (curr != NULL) {
   394     ExceptionCache* next = curr->next();
   396     Klass* ex_klass = curr->exception_type();
   397     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
   398       if (prev == NULL) {
   399         set_exception_cache(next);
   400       } else {
   401         prev->set_next(next);
   402       }
   403       delete curr;
   404       // prev stays the same.
   405     } else {
   406       prev = curr;
   407     }
   409     curr = next;
   410   }
   411 }
   413 // public method for accessing the exception cache
   414 // These are the public access methods.
   415 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
   416   // We never grab a lock to read the exception cache, so we may
   417   // have false negatives. This is okay, as it can only happen during
   418   // the first few exception lookups for a given nmethod.
   419   ExceptionCache* ec = exception_cache();
   420   while (ec != NULL) {
   421     address ret_val;
   422     if ((ret_val = ec->match(exception,pc)) != NULL) {
   423       return ret_val;
   424     }
   425     ec = ec->next();
   426   }
   427   return NULL;
   428 }
   431 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
   432   // There are potential race conditions during exception cache updates, so we
   433   // must own the ExceptionCache_lock before doing ANY modifications. Because
   434   // we don't lock during reads, it is possible to have several threads attempt
   435   // to update the cache with the same data. We need to check for already inserted
   436   // copies of the current data before adding it.
   438   MutexLocker ml(ExceptionCache_lock);
   439   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
   441   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
   442     target_entry = new ExceptionCache(exception,pc,handler);
   443     add_exception_cache_entry(target_entry);
   444   }
   445 }
   448 //-------------end of code for ExceptionCache--------------
   451 int nmethod::total_size() const {
   452   return
   453     consts_size()        +
   454     insts_size()         +
   455     stub_size()          +
   456     scopes_data_size()   +
   457     scopes_pcs_size()    +
   458     handler_table_size() +
   459     nul_chk_table_size();
   460 }
   462 const char* nmethod::compile_kind() const {
   463   if (is_osr_method())     return "osr";
   464   if (method() != NULL && is_native_method())  return "c2n";
   465   return NULL;
   466 }
   468 // Fill in default values for various flag fields
   469 void nmethod::init_defaults() {
   470   _state                      = in_use;
   471   _unloading_clock            = 0;
   472   _marked_for_reclamation     = 0;
   473   _has_flushed_dependencies   = 0;
   474   _has_unsafe_access          = 0;
   475   _has_method_handle_invokes  = 0;
   476   _lazy_critical_native       = 0;
   477   _has_wide_vectors           = 0;
   478   _marked_for_deoptimization  = 0;
   479   _lock_count                 = 0;
   480   _stack_traversal_mark       = 0;
   481   _unload_reported            = false;           // jvmti state
   483 #ifdef ASSERT
   484   _oops_are_stale             = false;
   485 #endif
   487   _oops_do_mark_link       = NULL;
   488   _jmethod_id              = NULL;
   489   _osr_link                = NULL;
   490   if (UseG1GC) {
   491     _unloading_next        = NULL;
   492   } else {
   493     _scavenge_root_link    = NULL;
   494   }
   495   _scavenge_root_state     = 0;
   496   _compiler                = NULL;
   497 #if INCLUDE_RTM_OPT
   498   _rtm_state               = NoRTM;
   499 #endif
   500 #ifdef HAVE_DTRACE_H
   501   _trap_offset             = 0;
   502 #endif // def HAVE_DTRACE_H
   503 }
   505 nmethod* nmethod::new_native_nmethod(methodHandle method,
   506   int compile_id,
   507   CodeBuffer *code_buffer,
   508   int vep_offset,
   509   int frame_complete,
   510   int frame_size,
   511   ByteSize basic_lock_owner_sp_offset,
   512   ByteSize basic_lock_sp_offset,
   513   OopMapSet* oop_maps) {
   514   code_buffer->finalize_oop_references(method);
   515   // create nmethod
   516   nmethod* nm = NULL;
   517   {
   518     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   519     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
   520     CodeOffsets offsets;
   521     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   522     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   523     nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
   524                                             compile_id, &offsets,
   525                                             code_buffer, frame_size,
   526                                             basic_lock_owner_sp_offset,
   527                                             basic_lock_sp_offset, oop_maps);
   528     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
   529     if (PrintAssembly && nm != NULL) {
   530       Disassembler::decode(nm);
   531     }
   532   }
   533   // verify nmethod
   534   debug_only(if (nm) nm->verify();) // might block
   536   if (nm != NULL) {
   537     nm->log_new_nmethod();
   538   }
   540   return nm;
   541 }
   543 #ifdef HAVE_DTRACE_H
   544 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
   545                                      CodeBuffer *code_buffer,
   546                                      int vep_offset,
   547                                      int trap_offset,
   548                                      int frame_complete,
   549                                      int frame_size) {
   550   code_buffer->finalize_oop_references(method);
   551   // create nmethod
   552   nmethod* nm = NULL;
   553   {
   554     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   555     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
   556     CodeOffsets offsets;
   557     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   558     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
   559     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   561     nm = new (nmethod_size) nmethod(method(), nmethod_size,
   562                                     &offsets, code_buffer, frame_size);
   564     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
   565     if (PrintAssembly && nm != NULL) {
   566       Disassembler::decode(nm);
   567     }
   568   }
   569   // verify nmethod
   570   debug_only(if (nm) nm->verify();) // might block
   572   if (nm != NULL) {
   573     nm->log_new_nmethod();
   574   }
   576   return nm;
   577 }
   579 #endif // def HAVE_DTRACE_H
   581 nmethod* nmethod::new_nmethod(methodHandle method,
   582   int compile_id,
   583   int entry_bci,
   584   CodeOffsets* offsets,
   585   int orig_pc_offset,
   586   DebugInformationRecorder* debug_info,
   587   Dependencies* dependencies,
   588   CodeBuffer* code_buffer, int frame_size,
   589   OopMapSet* oop_maps,
   590   ExceptionHandlerTable* handler_table,
   591   ImplicitExceptionTable* nul_chk_table,
   592   AbstractCompiler* compiler,
   593   int comp_level
   594 )
   595 {
   596   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   597   code_buffer->finalize_oop_references(method);
   598   // create nmethod
   599   nmethod* nm = NULL;
   600   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   601     int nmethod_size =
   602       allocation_size(code_buffer, sizeof(nmethod))
   603       + adjust_pcs_size(debug_info->pcs_size())
   604       + round_to(dependencies->size_in_bytes() , oopSize)
   605       + round_to(handler_table->size_in_bytes(), oopSize)
   606       + round_to(nul_chk_table->size_in_bytes(), oopSize)
   607       + round_to(debug_info->data_size()       , oopSize);
   609     nm = new (nmethod_size)
   610     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
   611             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
   612             oop_maps,
   613             handler_table,
   614             nul_chk_table,
   615             compiler,
   616             comp_level);
   618     if (nm != NULL) {
   619       // To make dependency checking during class loading fast, record
   620       // the nmethod dependencies in the classes it is dependent on.
   621       // This allows the dependency checking code to simply walk the
   622       // class hierarchy above the loaded class, checking only nmethods
   623       // which are dependent on those classes.  The slow way is to
   624       // check every nmethod for dependencies which makes it linear in
   625       // the number of methods compiled.  For applications with a lot
   626       // classes the slow way is too slow.
   627       for (Dependencies::DepStream deps(nm); deps.next(); ) {
   628         Klass* klass = deps.context_type();
   629         if (klass == NULL) {
   630           continue;  // ignore things like evol_method
   631         }
   633         // record this nmethod as dependent on this klass
   634         InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
   635       }
   636       NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
   637       if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
   638         Disassembler::decode(nm);
   639       }
   640     }
   641   }
   642   // Do verification and logging outside CodeCache_lock.
   643   if (nm != NULL) {
   644     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
   645     DEBUG_ONLY(nm->verify();)
   646     nm->log_new_nmethod();
   647   }
   648   return nm;
   649 }
   652 // For native wrappers
   653 nmethod::nmethod(
   654   Method* method,
   655   int nmethod_size,
   656   int compile_id,
   657   CodeOffsets* offsets,
   658   CodeBuffer* code_buffer,
   659   int frame_size,
   660   ByteSize basic_lock_owner_sp_offset,
   661   ByteSize basic_lock_sp_offset,
   662   OopMapSet* oop_maps )
   663   : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
   664              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
   665   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
   666   _native_basic_lock_sp_offset(basic_lock_sp_offset)
   667 {
   668   {
   669     debug_only(No_Safepoint_Verifier nsv;)
   670     assert_locked_or_safepoint(CodeCache_lock);
   672     init_defaults();
   673     _method                  = method;
   674     _entry_bci               = InvocationEntryBci;
   675     // We have no exception handler or deopt handler make the
   676     // values something that will never match a pc like the nmethod vtable entry
   677     _exception_offset        = 0;
   678     _deoptimize_offset       = 0;
   679     _deoptimize_mh_offset    = 0;
   680     _orig_pc_offset          = 0;
   682     _consts_offset           = data_offset();
   683     _stub_offset             = data_offset();
   684     _oops_offset             = data_offset();
   685     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
   686     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
   687     _scopes_pcs_offset       = _scopes_data_offset;
   688     _dependencies_offset     = _scopes_pcs_offset;
   689     _handler_table_offset    = _dependencies_offset;
   690     _nul_chk_table_offset    = _handler_table_offset;
   691     _nmethod_end_offset      = _nul_chk_table_offset;
   692     _compile_id              = compile_id;
   693     _comp_level              = CompLevel_none;
   694     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   695     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   696     _osr_entry_point         = NULL;
   697     _exception_cache         = NULL;
   698     _pc_desc_cache.reset_to(NULL);
   699     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
   701     code_buffer->copy_values_to(this);
   702     if (ScavengeRootsInCode) {
   703       if (detect_scavenge_root_oops()) {
   704         CodeCache::add_scavenge_root_nmethod(this);
   705       }
   706       Universe::heap()->register_nmethod(this);
   707     }
   708     debug_only(verify_scavenge_root_oops());
   709     CodeCache::commit(this);
   710   }
   712   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
   713     ttyLocker ttyl;  // keep the following output all in one block
   714     // This output goes directly to the tty, not the compiler log.
   715     // To enable tools to match it up with the compilation activity,
   716     // be sure to tag this tty output with the compile ID.
   717     if (xtty != NULL) {
   718       xtty->begin_head("print_native_nmethod");
   719       xtty->method(_method);
   720       xtty->stamp();
   721       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   722     }
   723     // print the header part first
   724     print();
   725     // then print the requested information
   726     if (PrintNativeNMethods) {
   727       print_code();
   728       if (oop_maps != NULL) {
   729         oop_maps->print();
   730       }
   731     }
   732     if (PrintRelocations) {
   733       print_relocations();
   734     }
   735     if (xtty != NULL) {
   736       xtty->tail("print_native_nmethod");
   737     }
   738   }
   739 }
   741 // For dtrace wrappers
   742 #ifdef HAVE_DTRACE_H
   743 nmethod::nmethod(
   744   Method* method,
   745   int nmethod_size,
   746   CodeOffsets* offsets,
   747   CodeBuffer* code_buffer,
   748   int frame_size)
   749   : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
   750              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
   751   _native_receiver_sp_offset(in_ByteSize(-1)),
   752   _native_basic_lock_sp_offset(in_ByteSize(-1))
   753 {
   754   {
   755     debug_only(No_Safepoint_Verifier nsv;)
   756     assert_locked_or_safepoint(CodeCache_lock);
   758     init_defaults();
   759     _method                  = method;
   760     _entry_bci               = InvocationEntryBci;
   761     // We have no exception handler or deopt handler make the
   762     // values something that will never match a pc like the nmethod vtable entry
   763     _exception_offset        = 0;
   764     _deoptimize_offset       = 0;
   765     _deoptimize_mh_offset    = 0;
   766     _unwind_handler_offset   = -1;
   767     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
   768     _orig_pc_offset          = 0;
   769     _consts_offset           = data_offset();
   770     _stub_offset             = data_offset();
   771     _oops_offset             = data_offset();
   772     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
   773     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
   774     _scopes_pcs_offset       = _scopes_data_offset;
   775     _dependencies_offset     = _scopes_pcs_offset;
   776     _handler_table_offset    = _dependencies_offset;
   777     _nul_chk_table_offset    = _handler_table_offset;
   778     _nmethod_end_offset      = _nul_chk_table_offset;
   779     _compile_id              = 0;  // default
   780     _comp_level              = CompLevel_none;
   781     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   782     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   783     _osr_entry_point         = NULL;
   784     _exception_cache         = NULL;
   785     _pc_desc_cache.reset_to(NULL);
   786     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
   788     code_buffer->copy_values_to(this);
   789     if (ScavengeRootsInCode) {
   790       if (detect_scavenge_root_oops()) {
   791         CodeCache::add_scavenge_root_nmethod(this);
   792       }
   793       Universe::heap()->register_nmethod(this);
   794     }
   795     DEBUG_ONLY(verify_scavenge_root_oops();)
   796     CodeCache::commit(this);
   797   }
   799   if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
   800     ttyLocker ttyl;  // keep the following output all in one block
   801     // This output goes directly to the tty, not the compiler log.
   802     // To enable tools to match it up with the compilation activity,
   803     // be sure to tag this tty output with the compile ID.
   804     if (xtty != NULL) {
   805       xtty->begin_head("print_dtrace_nmethod");
   806       xtty->method(_method);
   807       xtty->stamp();
   808       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   809     }
   810     // print the header part first
   811     print();
   812     // then print the requested information
   813     if (PrintNMethods) {
   814       print_code();
   815     }
   816     if (PrintRelocations) {
   817       print_relocations();
   818     }
   819     if (xtty != NULL) {
   820       xtty->tail("print_dtrace_nmethod");
   821     }
   822   }
   823 }
   824 #endif // def HAVE_DTRACE_H
   826 void* nmethod::operator new(size_t size, int nmethod_size) throw() {
   827   // Not critical, may return null if there is too little continuous memory
   828   return CodeCache::allocate(nmethod_size);
   829 }
   831 nmethod::nmethod(
   832   Method* method,
   833   int nmethod_size,
   834   int compile_id,
   835   int entry_bci,
   836   CodeOffsets* offsets,
   837   int orig_pc_offset,
   838   DebugInformationRecorder* debug_info,
   839   Dependencies* dependencies,
   840   CodeBuffer *code_buffer,
   841   int frame_size,
   842   OopMapSet* oop_maps,
   843   ExceptionHandlerTable* handler_table,
   844   ImplicitExceptionTable* nul_chk_table,
   845   AbstractCompiler* compiler,
   846   int comp_level
   847   )
   848   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
   849              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
   850   _native_receiver_sp_offset(in_ByteSize(-1)),
   851   _native_basic_lock_sp_offset(in_ByteSize(-1))
   852 {
   853   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   854   {
   855     debug_only(No_Safepoint_Verifier nsv;)
   856     assert_locked_or_safepoint(CodeCache_lock);
   858     init_defaults();
   859     _method                  = method;
   860     _entry_bci               = entry_bci;
   861     _compile_id              = compile_id;
   862     _comp_level              = comp_level;
   863     _compiler                = compiler;
   864     _orig_pc_offset          = orig_pc_offset;
   865     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
   867     // Section offsets
   868     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
   869     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
   871     // Exception handler and deopt handler are in the stub section
   872     assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
   873     assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
   874     _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
   875     _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
   876     if (offsets->value(CodeOffsets::DeoptMH) != -1) {
   877       _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
   878     } else {
   879       _deoptimize_mh_offset  = -1;
   880     }
   881     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
   882       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
   883     } else {
   884       _unwind_handler_offset = -1;
   885     }
   887     _oops_offset             = data_offset();
   888     _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
   889     _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
   891     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
   892     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
   893     _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
   894     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
   895     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
   897     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   898     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   899     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
   900     _exception_cache         = NULL;
   901     _pc_desc_cache.reset_to(scopes_pcs_begin());
   903     // Copy contents of ScopeDescRecorder to nmethod
   904     code_buffer->copy_values_to(this);
   905     debug_info->copy_to(this);
   906     dependencies->copy_to(this);
   907     if (ScavengeRootsInCode) {
   908       if (detect_scavenge_root_oops()) {
   909         CodeCache::add_scavenge_root_nmethod(this);
   910       }
   911       Universe::heap()->register_nmethod(this);
   912     }
   913     debug_only(verify_scavenge_root_oops());
   915     CodeCache::commit(this);
   917     // Copy contents of ExceptionHandlerTable to nmethod
   918     handler_table->copy_to(this);
   919     nul_chk_table->copy_to(this);
   921     // we use the information of entry points to find out if a method is
   922     // static or non static
   923     assert(compiler->is_c2() ||
   924            _method->is_static() == (entry_point() == _verified_entry_point),
   925            " entry points must be same for static methods and vice versa");
   926   }
   928   bool printnmethods = PrintNMethods
   929     || CompilerOracle::should_print(_method)
   930     || CompilerOracle::has_option_string(_method, "PrintNMethods");
   931   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
   932     print_nmethod(printnmethods);
   933   }
   934 }
   937 // Print a short set of xml attributes to identify this nmethod.  The
   938 // output should be embedded in some other element.
   939 void nmethod::log_identity(xmlStream* log) const {
   940   log->print(" compile_id='%d'", compile_id());
   941   const char* nm_kind = compile_kind();
   942   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
   943   if (compiler() != NULL) {
   944     log->print(" compiler='%s'", compiler()->name());
   945   }
   946   if (TieredCompilation) {
   947     log->print(" level='%d'", comp_level());
   948   }
   949 }
   952 #define LOG_OFFSET(log, name)                    \
   953   if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
   954     log->print(" " XSTR(name) "_offset='%d'"    , \
   955                (intptr_t)name##_begin() - (intptr_t)this)
   958 void nmethod::log_new_nmethod() const {
   959   if (LogCompilation && xtty != NULL) {
   960     ttyLocker ttyl;
   961     HandleMark hm;
   962     xtty->begin_elem("nmethod");
   963     log_identity(xtty);
   964     xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
   965     xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   967     LOG_OFFSET(xtty, relocation);
   968     LOG_OFFSET(xtty, consts);
   969     LOG_OFFSET(xtty, insts);
   970     LOG_OFFSET(xtty, stub);
   971     LOG_OFFSET(xtty, scopes_data);
   972     LOG_OFFSET(xtty, scopes_pcs);
   973     LOG_OFFSET(xtty, dependencies);
   974     LOG_OFFSET(xtty, handler_table);
   975     LOG_OFFSET(xtty, nul_chk_table);
   976     LOG_OFFSET(xtty, oops);
   978     xtty->method(method());
   979     xtty->stamp();
   980     xtty->end_elem();
   981   }
   982 }
   984 #undef LOG_OFFSET
   987 // Print out more verbose output usually for a newly created nmethod.
   988 void nmethod::print_on(outputStream* st, const char* msg) const {
   989   if (st != NULL) {
   990     ttyLocker ttyl;
   991     if (WizardMode) {
   992       CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
   993       st->print_cr(" (" INTPTR_FORMAT ")", this);
   994     } else {
   995       CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
   996     }
   997   }
   998 }
  1001 void nmethod::print_nmethod(bool printmethod) {
  1002   ttyLocker ttyl;  // keep the following output all in one block
  1003   if (xtty != NULL) {
  1004     xtty->begin_head("print_nmethod");
  1005     xtty->stamp();
  1006     xtty->end_head();
  1008   // print the header part first
  1009   print();
  1010   // then print the requested information
  1011   if (printmethod) {
  1012     print_code();
  1013     print_pcs();
  1014     if (oop_maps()) {
  1015       oop_maps()->print();
  1018   if (PrintDebugInfo) {
  1019     print_scopes();
  1021   if (PrintRelocations) {
  1022     print_relocations();
  1024   if (PrintDependencies) {
  1025     print_dependencies();
  1027   if (PrintExceptionHandlers) {
  1028     print_handler_table();
  1029     print_nul_chk_table();
  1031   if (xtty != NULL) {
  1032     xtty->tail("print_nmethod");
  1037 // Promote one word from an assembly-time handle to a live embedded oop.
  1038 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
  1039   if (handle == NULL ||
  1040       // As a special case, IC oops are initialized to 1 or -1.
  1041       handle == (jobject) Universe::non_oop_word()) {
  1042     (*dest) = (oop) handle;
  1043   } else {
  1044     (*dest) = JNIHandles::resolve_non_null(handle);
  1049 // Have to have the same name because it's called by a template
  1050 void nmethod::copy_values(GrowableArray<jobject>* array) {
  1051   int length = array->length();
  1052   assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
  1053   oop* dest = oops_begin();
  1054   for (int index = 0 ; index < length; index++) {
  1055     initialize_immediate_oop(&dest[index], array->at(index));
  1058   // Now we can fix up all the oops in the code.  We need to do this
  1059   // in the code because the assembler uses jobjects as placeholders.
  1060   // The code and relocations have already been initialized by the
  1061   // CodeBlob constructor, so it is valid even at this early point to
  1062   // iterate over relocations and patch the code.
  1063   fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
  1066 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
  1067   int length = array->length();
  1068   assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
  1069   Metadata** dest = metadata_begin();
  1070   for (int index = 0 ; index < length; index++) {
  1071     dest[index] = array->at(index);
  1075 bool nmethod::is_at_poll_return(address pc) {
  1076   RelocIterator iter(this, pc, pc+1);
  1077   while (iter.next()) {
  1078     if (iter.type() == relocInfo::poll_return_type)
  1079       return true;
  1081   return false;
  1085 bool nmethod::is_at_poll_or_poll_return(address pc) {
  1086   RelocIterator iter(this, pc, pc+1);
  1087   while (iter.next()) {
  1088     relocInfo::relocType t = iter.type();
  1089     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
  1090       return true;
  1092   return false;
  1096 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
  1097   // re-patch all oop-bearing instructions, just in case some oops moved
  1098   RelocIterator iter(this, begin, end);
  1099   while (iter.next()) {
  1100     if (iter.type() == relocInfo::oop_type) {
  1101       oop_Relocation* reloc = iter.oop_reloc();
  1102       if (initialize_immediates && reloc->oop_is_immediate()) {
  1103         oop* dest = reloc->oop_addr();
  1104         initialize_immediate_oop(dest, (jobject) *dest);
  1106       // Refresh the oop-related bits of this instruction.
  1107       reloc->fix_oop_relocation();
  1108     } else if (iter.type() == relocInfo::metadata_type) {
  1109       metadata_Relocation* reloc = iter.metadata_reloc();
  1110       reloc->fix_metadata_relocation();
  1116 void nmethod::verify_oop_relocations() {
  1117   // Ensure sure that the code matches the current oop values
  1118   RelocIterator iter(this, NULL, NULL);
  1119   while (iter.next()) {
  1120     if (iter.type() == relocInfo::oop_type) {
  1121       oop_Relocation* reloc = iter.oop_reloc();
  1122       if (!reloc->oop_is_immediate()) {
  1123         reloc->verify_oop_relocation();
  1130 ScopeDesc* nmethod::scope_desc_at(address pc) {
  1131   PcDesc* pd = pc_desc_at(pc);
  1132   guarantee(pd != NULL, "scope must be present");
  1133   return new ScopeDesc(this, pd->scope_decode_offset(),
  1134                        pd->obj_decode_offset(), pd->should_reexecute(),
  1135                        pd->return_oop());
  1139 void nmethod::clear_inline_caches() {
  1140   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
  1141   if (is_zombie()) {
  1142     return;
  1145   RelocIterator iter(this);
  1146   while (iter.next()) {
  1147     iter.reloc()->clear_inline_cache();
  1151 // Clear ICStubs of all compiled ICs
  1152 void nmethod::clear_ic_stubs() {
  1153   assert_locked_or_safepoint(CompiledIC_lock);
  1154   RelocIterator iter(this);
  1155   while(iter.next()) {
  1156     if (iter.type() == relocInfo::virtual_call_type) {
  1157       CompiledIC* ic = CompiledIC_at(&iter);
  1158       ic->clear_ic_stub();
  1164 void nmethod::cleanup_inline_caches() {
  1166   assert_locked_or_safepoint(CompiledIC_lock);
  1168   // If the method is not entrant or zombie then a JMP is plastered over the
  1169   // first few bytes.  If an oop in the old code was there, that oop
  1170   // should not get GC'd.  Skip the first few bytes of oops on
  1171   // not-entrant methods.
  1172   address low_boundary = verified_entry_point();
  1173   if (!is_in_use()) {
  1174     low_boundary += NativeJump::instruction_size;
  1175     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1176     // This means that the low_boundary is going to be a little too high.
  1177     // This shouldn't matter, since oops of non-entrant methods are never used.
  1178     // In fact, why are we bothering to look at oops in a non-entrant method??
  1181   // Find all calls in an nmethod, and clear the ones that points to zombie methods
  1182   ResourceMark rm;
  1183   RelocIterator iter(this, low_boundary);
  1184   while(iter.next()) {
  1185     switch(iter.type()) {
  1186       case relocInfo::virtual_call_type:
  1187       case relocInfo::opt_virtual_call_type: {
  1188         CompiledIC *ic = CompiledIC_at(&iter);
  1189         // Ok, to lookup references to zombies here
  1190         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
  1191         if( cb != NULL && cb->is_nmethod() ) {
  1192           nmethod* nm = (nmethod*)cb;
  1193           // Clean inline caches pointing to both zombie and not_entrant methods
  1194           if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
  1196         break;
  1198       case relocInfo::static_call_type: {
  1199         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
  1200         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
  1201         if( cb != NULL && cb->is_nmethod() ) {
  1202           nmethod* nm = (nmethod*)cb;
  1203           // Clean inline caches pointing to both zombie and not_entrant methods
  1204           if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
  1206         break;
  1212 void nmethod::verify_clean_inline_caches() {
  1213   assert_locked_or_safepoint(CompiledIC_lock);
  1215   // If the method is not entrant or zombie then a JMP is plastered over the
  1216   // first few bytes.  If an oop in the old code was there, that oop
  1217   // should not get GC'd.  Skip the first few bytes of oops on
  1218   // not-entrant methods.
  1219   address low_boundary = verified_entry_point();
  1220   if (!is_in_use()) {
  1221     low_boundary += NativeJump::instruction_size;
  1222     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1223     // This means that the low_boundary is going to be a little too high.
  1224     // This shouldn't matter, since oops of non-entrant methods are never used.
  1225     // In fact, why are we bothering to look at oops in a non-entrant method??
  1228   ResourceMark rm;
  1229   RelocIterator iter(this, low_boundary);
  1230   while(iter.next()) {
  1231     switch(iter.type()) {
  1232       case relocInfo::virtual_call_type:
  1233       case relocInfo::opt_virtual_call_type: {
  1234         CompiledIC *ic = CompiledIC_at(&iter);
  1235         // Ok, to lookup references to zombies here
  1236         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
  1237         if( cb != NULL && cb->is_nmethod() ) {
  1238           nmethod* nm = (nmethod*)cb;
  1239           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
  1240           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
  1241             assert(ic->is_clean(), "IC should be clean");
  1244         break;
  1246       case relocInfo::static_call_type: {
  1247         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
  1248         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
  1249         if( cb != NULL && cb->is_nmethod() ) {
  1250           nmethod* nm = (nmethod*)cb;
  1251           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
  1252           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
  1253             assert(csc->is_clean(), "IC should be clean");
  1256         break;
  1262 int nmethod::verify_icholder_relocations() {
  1263   int count = 0;
  1265   RelocIterator iter(this);
  1266   while(iter.next()) {
  1267     if (iter.type() == relocInfo::virtual_call_type) {
  1268       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
  1269         CompiledIC *ic = CompiledIC_at(&iter);
  1270         if (TraceCompiledIC) {
  1271           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
  1272           ic->print();
  1274         assert(ic->cached_icholder() != NULL, "must be non-NULL");
  1275         count++;
  1280   return count;
  1283 // This is a private interface with the sweeper.
  1284 void nmethod::mark_as_seen_on_stack() {
  1285   assert(is_alive(), "Must be an alive method");
  1286   // Set the traversal mark to ensure that the sweeper does 2
  1287   // cleaning passes before moving to zombie.
  1288   set_stack_traversal_mark(NMethodSweeper::traversal_count());
  1291 // Tell if a non-entrant method can be converted to a zombie (i.e.,
  1292 // there are no activations on the stack, not in use by the VM,
  1293 // and not in use by the ServiceThread)
  1294 bool nmethod::can_not_entrant_be_converted() {
  1295   assert(is_not_entrant(), "must be a non-entrant method");
  1297   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
  1298   // count can be greater than the stack traversal count before it hits the
  1299   // nmethod for the second time.
  1300   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
  1301          !is_locked_by_vm();
  1304 void nmethod::inc_decompile_count() {
  1305   if (!is_compiled_by_c2()) return;
  1306   // Could be gated by ProfileTraps, but do not bother...
  1307   Method* m = method();
  1308   if (m == NULL)  return;
  1309   MethodData* mdo = m->method_data();
  1310   if (mdo == NULL)  return;
  1311   // There is a benign race here.  See comments in methodData.hpp.
  1312   mdo->inc_decompile_count();
  1315 void nmethod::increase_unloading_clock() {
  1316   _global_unloading_clock++;
  1317   if (_global_unloading_clock == 0) {
  1318     // _nmethods are allocated with _unloading_clock == 0,
  1319     // so 0 is never used as a clock value.
  1320     _global_unloading_clock = 1;
  1324 void nmethod::set_unloading_clock(unsigned char unloading_clock) {
  1325   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
  1328 unsigned char nmethod::unloading_clock() {
  1329   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
  1332 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
  1334   post_compiled_method_unload();
  1336   // Since this nmethod is being unloaded, make sure that dependencies
  1337   // recorded in instanceKlasses get flushed and pass non-NULL closure to
  1338   // indicate that this work is being done during a GC.
  1339   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
  1340   assert(is_alive != NULL, "Should be non-NULL");
  1341   // A non-NULL is_alive closure indicates that this is being called during GC.
  1342   flush_dependencies(is_alive);
  1344   // Break cycle between nmethod & method
  1345   if (TraceClassUnloading && WizardMode) {
  1346     tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
  1347                   " unloadable], Method*(" INTPTR_FORMAT
  1348                   "), cause(" INTPTR_FORMAT ")",
  1349                   this, (address)_method, (address)cause);
  1350     if (!Universe::heap()->is_gc_active())
  1351       cause->klass()->print();
  1353   // Unlink the osr method, so we do not look this up again
  1354   if (is_osr_method()) {
  1355     invalidate_osr_method();
  1357   // If _method is already NULL the Method* is about to be unloaded,
  1358   // so we don't have to break the cycle. Note that it is possible to
  1359   // have the Method* live here, in case we unload the nmethod because
  1360   // it is pointing to some oop (other than the Method*) being unloaded.
  1361   if (_method != NULL) {
  1362     // OSR methods point to the Method*, but the Method* does not
  1363     // point back!
  1364     if (_method->code() == this) {
  1365       _method->clear_code(); // Break a cycle
  1367     _method = NULL;            // Clear the method of this dead nmethod
  1369   // Make the class unloaded - i.e., change state and notify sweeper
  1370   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1371   if (is_in_use()) {
  1372     // Transitioning directly from live to unloaded -- so
  1373     // we need to force a cache clean-up; remember this
  1374     // for later on.
  1375     CodeCache::set_needs_cache_clean(true);
  1378   // Unregister must be done before the state change
  1379   Universe::heap()->unregister_nmethod(this);
  1381   _state = unloaded;
  1383   // Log the unloading.
  1384   log_state_change();
  1386   // The Method* is gone at this point
  1387   assert(_method == NULL, "Tautology");
  1389   set_osr_link(NULL);
  1390   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
  1391   NMethodSweeper::report_state_change(this);
  1394 void nmethod::invalidate_osr_method() {
  1395   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
  1396   // Remove from list of active nmethods
  1397   if (method() != NULL)
  1398     method()->method_holder()->remove_osr_nmethod(this);
  1399   // Set entry as invalid
  1400   _entry_bci = InvalidOSREntryBci;
  1403 void nmethod::log_state_change() const {
  1404   if (LogCompilation) {
  1405     if (xtty != NULL) {
  1406       ttyLocker ttyl;  // keep the following output all in one block
  1407       if (_state == unloaded) {
  1408         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
  1409                          os::current_thread_id());
  1410       } else {
  1411         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
  1412                          os::current_thread_id(),
  1413                          (_state == zombie ? " zombie='1'" : ""));
  1415       log_identity(xtty);
  1416       xtty->stamp();
  1417       xtty->end_elem();
  1420   if (PrintCompilation && _state != unloaded) {
  1421     print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
  1425 /**
  1426  * Common functionality for both make_not_entrant and make_zombie
  1427  */
  1428 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
  1429   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
  1430   assert(!is_zombie(), "should not already be a zombie");
  1432   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
  1433   nmethodLocker nml(this);
  1434   methodHandle the_method(method());
  1435   No_Safepoint_Verifier nsv;
  1437   // during patching, depending on the nmethod state we must notify the GC that
  1438   // code has been unloaded, unregistering it. We cannot do this right while
  1439   // holding the Patching_lock because we need to use the CodeCache_lock. This
  1440   // would be prone to deadlocks.
  1441   // This flag is used to remember whether we need to later lock and unregister.
  1442   bool nmethod_needs_unregister = false;
  1445     // invalidate osr nmethod before acquiring the patching lock since
  1446     // they both acquire leaf locks and we don't want a deadlock.
  1447     // This logic is equivalent to the logic below for patching the
  1448     // verified entry point of regular methods.
  1449     if (is_osr_method()) {
  1450       // this effectively makes the osr nmethod not entrant
  1451       invalidate_osr_method();
  1454     // Enter critical section.  Does not block for safepoint.
  1455     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
  1457     if (_state == state) {
  1458       // another thread already performed this transition so nothing
  1459       // to do, but return false to indicate this.
  1460       return false;
  1463     // The caller can be calling the method statically or through an inline
  1464     // cache call.
  1465     if (!is_osr_method() && !is_not_entrant()) {
  1466       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
  1467                   SharedRuntime::get_handle_wrong_method_stub());
  1470     if (is_in_use()) {
  1471       // It's a true state change, so mark the method as decompiled.
  1472       // Do it only for transition from alive.
  1473       inc_decompile_count();
  1476     // If the state is becoming a zombie, signal to unregister the nmethod with
  1477     // the heap.
  1478     // This nmethod may have already been unloaded during a full GC.
  1479     if ((state == zombie) && !is_unloaded()) {
  1480       nmethod_needs_unregister = true;
  1483     // Must happen before state change. Otherwise we have a race condition in
  1484     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
  1485     // transition its state from 'not_entrant' to 'zombie' without having to wait
  1486     // for stack scanning.
  1487     if (state == not_entrant) {
  1488       mark_as_seen_on_stack();
  1489       OrderAccess::storestore();
  1492     // Change state
  1493     _state = state;
  1495     // Log the transition once
  1496     log_state_change();
  1498     // Remove nmethod from method.
  1499     // We need to check if both the _code and _from_compiled_code_entry_point
  1500     // refer to this nmethod because there is a race in setting these two fields
  1501     // in Method* as seen in bugid 4947125.
  1502     // If the vep() points to the zombie nmethod, the memory for the nmethod
  1503     // could be flushed and the compiler and vtable stubs could still call
  1504     // through it.
  1505     if (method() != NULL && (method()->code() == this ||
  1506                              method()->from_compiled_entry() == verified_entry_point())) {
  1507       HandleMark hm;
  1508       method()->clear_code();
  1510   } // leave critical region under Patching_lock
  1512   // When the nmethod becomes zombie it is no longer alive so the
  1513   // dependencies must be flushed.  nmethods in the not_entrant
  1514   // state will be flushed later when the transition to zombie
  1515   // happens or they get unloaded.
  1516   if (state == zombie) {
  1518       // Flushing dependecies must be done before any possible
  1519       // safepoint can sneak in, otherwise the oops used by the
  1520       // dependency logic could have become stale.
  1521       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1522       if (nmethod_needs_unregister) {
  1523         Universe::heap()->unregister_nmethod(this);
  1525       flush_dependencies(NULL);
  1528     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
  1529     // event and it hasn't already been reported for this nmethod then
  1530     // report it now. The event may have been reported earilier if the GC
  1531     // marked it for unloading). JvmtiDeferredEventQueue support means
  1532     // we no longer go to a safepoint here.
  1533     post_compiled_method_unload();
  1535 #ifdef ASSERT
  1536     // It's no longer safe to access the oops section since zombie
  1537     // nmethods aren't scanned for GC.
  1538     _oops_are_stale = true;
  1539 #endif
  1540      // the Method may be reclaimed by class unloading now that the
  1541      // nmethod is in zombie state
  1542     set_method(NULL);
  1543   } else {
  1544     assert(state == not_entrant, "other cases may need to be handled differently");
  1547   if (TraceCreateZombies) {
  1548     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
  1551   NMethodSweeper::report_state_change(this);
  1552   return true;
  1555 void nmethod::flush() {
  1556   // Note that there are no valid oops in the nmethod anymore.
  1557   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
  1558   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
  1560   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
  1561   assert_locked_or_safepoint(CodeCache_lock);
  1563   // completely deallocate this method
  1564   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
  1565   if (PrintMethodFlushing) {
  1566     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
  1567         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
  1570   // We need to deallocate any ExceptionCache data.
  1571   // Note that we do not need to grab the nmethod lock for this, it
  1572   // better be thread safe if we're disposing of it!
  1573   ExceptionCache* ec = exception_cache();
  1574   set_exception_cache(NULL);
  1575   while(ec != NULL) {
  1576     ExceptionCache* next = ec->next();
  1577     delete ec;
  1578     ec = next;
  1581   if (on_scavenge_root_list()) {
  1582     CodeCache::drop_scavenge_root_nmethod(this);
  1585 #ifdef SHARK
  1586   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
  1587 #endif // SHARK
  1589   ((CodeBlob*)(this))->flush();
  1591   CodeCache::free(this);
  1595 //
  1596 // Notify all classes this nmethod is dependent on that it is no
  1597 // longer dependent. This should only be called in two situations.
  1598 // First, when a nmethod transitions to a zombie all dependents need
  1599 // to be clear.  Since zombification happens at a safepoint there's no
  1600 // synchronization issues.  The second place is a little more tricky.
  1601 // During phase 1 of mark sweep class unloading may happen and as a
  1602 // result some nmethods may get unloaded.  In this case the flushing
  1603 // of dependencies must happen during phase 1 since after GC any
  1604 // dependencies in the unloaded nmethod won't be updated, so
  1605 // traversing the dependency information in unsafe.  In that case this
  1606 // function is called with a non-NULL argument and this function only
  1607 // notifies instanceKlasses that are reachable
  1609 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
  1610   assert_locked_or_safepoint(CodeCache_lock);
  1611   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
  1612   "is_alive is non-NULL if and only if we are called during GC");
  1613   if (!has_flushed_dependencies()) {
  1614     set_has_flushed_dependencies();
  1615     for (Dependencies::DepStream deps(this); deps.next(); ) {
  1616       Klass* klass = deps.context_type();
  1617       if (klass == NULL)  continue;  // ignore things like evol_method
  1619       // During GC the is_alive closure is non-NULL, and is used to
  1620       // determine liveness of dependees that need to be updated.
  1621       if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
  1622         InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
  1629 // If this oop is not live, the nmethod can be unloaded.
  1630 bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
  1631   assert(root != NULL, "just checking");
  1632   oop obj = *root;
  1633   if (obj == NULL || is_alive->do_object_b(obj)) {
  1634       return false;
  1637   // If ScavengeRootsInCode is true, an nmethod might be unloaded
  1638   // simply because one of its constant oops has gone dead.
  1639   // No actual classes need to be unloaded in order for this to occur.
  1640   assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
  1641   make_unloaded(is_alive, obj);
  1642   return true;
  1645 // ------------------------------------------------------------------
  1646 // post_compiled_method_load_event
  1647 // new method for install_code() path
  1648 // Transfer information from compilation to jvmti
  1649 void nmethod::post_compiled_method_load_event() {
  1651   Method* moop = method();
  1652 #ifndef USDT2
  1653   HS_DTRACE_PROBE8(hotspot, compiled__method__load,
  1654       moop->klass_name()->bytes(),
  1655       moop->klass_name()->utf8_length(),
  1656       moop->name()->bytes(),
  1657       moop->name()->utf8_length(),
  1658       moop->signature()->bytes(),
  1659       moop->signature()->utf8_length(),
  1660       insts_begin(), insts_size());
  1661 #else /* USDT2 */
  1662   HOTSPOT_COMPILED_METHOD_LOAD(
  1663       (char *) moop->klass_name()->bytes(),
  1664       moop->klass_name()->utf8_length(),
  1665       (char *) moop->name()->bytes(),
  1666       moop->name()->utf8_length(),
  1667       (char *) moop->signature()->bytes(),
  1668       moop->signature()->utf8_length(),
  1669       insts_begin(), insts_size());
  1670 #endif /* USDT2 */
  1672   if (JvmtiExport::should_post_compiled_method_load() ||
  1673       JvmtiExport::should_post_compiled_method_unload()) {
  1674     get_and_cache_jmethod_id();
  1677   if (JvmtiExport::should_post_compiled_method_load()) {
  1678     // Let the Service thread (which is a real Java thread) post the event
  1679     MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
  1680     JvmtiDeferredEventQueue::enqueue(
  1681       JvmtiDeferredEvent::compiled_method_load_event(this));
  1685 jmethodID nmethod::get_and_cache_jmethod_id() {
  1686   if (_jmethod_id == NULL) {
  1687     // Cache the jmethod_id since it can no longer be looked up once the
  1688     // method itself has been marked for unloading.
  1689     _jmethod_id = method()->jmethod_id();
  1691   return _jmethod_id;
  1694 void nmethod::post_compiled_method_unload() {
  1695   if (unload_reported()) {
  1696     // During unloading we transition to unloaded and then to zombie
  1697     // and the unloading is reported during the first transition.
  1698     return;
  1701   assert(_method != NULL && !is_unloaded(), "just checking");
  1702   DTRACE_METHOD_UNLOAD_PROBE(method());
  1704   // If a JVMTI agent has enabled the CompiledMethodUnload event then
  1705   // post the event. Sometime later this nmethod will be made a zombie
  1706   // by the sweeper but the Method* will not be valid at that point.
  1707   // If the _jmethod_id is null then no load event was ever requested
  1708   // so don't bother posting the unload.  The main reason for this is
  1709   // that the jmethodID is a weak reference to the Method* so if
  1710   // it's being unloaded there's no way to look it up since the weak
  1711   // ref will have been cleared.
  1712   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
  1713     assert(!unload_reported(), "already unloaded");
  1714     JvmtiDeferredEvent event =
  1715       JvmtiDeferredEvent::compiled_method_unload_event(this,
  1716           _jmethod_id, insts_begin());
  1717     if (SafepointSynchronize::is_at_safepoint()) {
  1718       // Don't want to take the queueing lock. Add it as pending and
  1719       // it will get enqueued later.
  1720       JvmtiDeferredEventQueue::add_pending_event(event);
  1721     } else {
  1722       MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
  1723       JvmtiDeferredEventQueue::enqueue(event);
  1727   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
  1728   // any time. As the nmethod is being unloaded now we mark it has
  1729   // having the unload event reported - this will ensure that we don't
  1730   // attempt to report the event in the unlikely scenario where the
  1731   // event is enabled at the time the nmethod is made a zombie.
  1732   set_unload_reported();
  1735 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
  1736   if (ic->is_icholder_call()) {
  1737     // The only exception is compiledICHolder oops which may
  1738     // yet be marked below. (We check this further below).
  1739     CompiledICHolder* cichk_oop = ic->cached_icholder();
  1741     if (mark_on_stack) {
  1742       Metadata::mark_on_stack(cichk_oop->holder_method());
  1743       Metadata::mark_on_stack(cichk_oop->holder_klass());
  1746     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
  1747         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
  1748       return;
  1750   } else {
  1751     Metadata* ic_oop = ic->cached_metadata();
  1752     if (ic_oop != NULL) {
  1753       if (mark_on_stack) {
  1754         Metadata::mark_on_stack(ic_oop);
  1757       if (ic_oop->is_klass()) {
  1758         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
  1759           return;
  1761       } else if (ic_oop->is_method()) {
  1762         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
  1763           return;
  1765       } else {
  1766         ShouldNotReachHere();
  1771   ic->set_to_clean();
  1774 // This is called at the end of the strong tracing/marking phase of a
  1775 // GC to unload an nmethod if it contains otherwise unreachable
  1776 // oops.
  1778 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
  1779   // Make sure the oop's ready to receive visitors
  1780   assert(!is_zombie() && !is_unloaded(),
  1781          "should not call follow on zombie or unloaded nmethod");
  1783   // If the method is not entrant then a JMP is plastered over the
  1784   // first few bytes.  If an oop in the old code was there, that oop
  1785   // should not get GC'd.  Skip the first few bytes of oops on
  1786   // not-entrant methods.
  1787   address low_boundary = verified_entry_point();
  1788   if (is_not_entrant()) {
  1789     low_boundary += NativeJump::instruction_size;
  1790     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1791     // (See comment above.)
  1794   // The RedefineClasses() API can cause the class unloading invariant
  1795   // to no longer be true. See jvmtiExport.hpp for details.
  1796   // Also, leave a debugging breadcrumb in local flag.
  1797   bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  1798   if (a_class_was_redefined) {
  1799     // This set of the unloading_occurred flag is done before the
  1800     // call to post_compiled_method_unload() so that the unloading
  1801     // of this nmethod is reported.
  1802     unloading_occurred = true;
  1805   // Exception cache
  1806   clean_exception_cache(is_alive);
  1808   // If class unloading occurred we first iterate over all inline caches and
  1809   // clear ICs where the cached oop is referring to an unloaded klass or method.
  1810   // The remaining live cached oops will be traversed in the relocInfo::oop_type
  1811   // iteration below.
  1812   if (unloading_occurred) {
  1813     RelocIterator iter(this, low_boundary);
  1814     while(iter.next()) {
  1815       if (iter.type() == relocInfo::virtual_call_type) {
  1816         CompiledIC *ic = CompiledIC_at(&iter);
  1817         clean_ic_if_metadata_is_dead(ic, is_alive, false);
  1822   // Compiled code
  1824   RelocIterator iter(this, low_boundary);
  1825   while (iter.next()) {
  1826     if (iter.type() == relocInfo::oop_type) {
  1827       oop_Relocation* r = iter.oop_reloc();
  1828       // In this loop, we must only traverse those oops directly embedded in
  1829       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
  1830       assert(1 == (r->oop_is_immediate()) +
  1831                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1832              "oop must be found in exactly one place");
  1833       if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1834         if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
  1835           return;
  1843   // Scopes
  1844   for (oop* p = oops_begin(); p < oops_end(); p++) {
  1845     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  1846     if (can_unload(is_alive, p, unloading_occurred)) {
  1847       return;
  1851   // Ensure that all metadata is still alive
  1852   verify_metadata_loaders(low_boundary, is_alive);
  1855 template <class CompiledICorStaticCall>
  1856 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
  1857   // Ok, to lookup references to zombies here
  1858   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
  1859   if (cb != NULL && cb->is_nmethod()) {
  1860     nmethod* nm = (nmethod*)cb;
  1862     if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
  1863       // The nmethod has not been processed yet.
  1864       return true;
  1867     // Clean inline caches pointing to both zombie and not_entrant methods
  1868     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
  1869       ic->set_to_clean();
  1870       assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
  1874   return false;
  1877 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
  1878   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
  1881 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
  1882   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
  1885 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
  1886   assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
  1888   oop_Relocation* r = iter_at_oop->oop_reloc();
  1889   // Traverse those oops directly embedded in the code.
  1890   // Other oops (oop_index>0) are seen as part of scopes_oops.
  1891   assert(1 == (r->oop_is_immediate()) +
  1892          (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1893          "oop must be found in exactly one place");
  1894   if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1895     // Unload this nmethod if the oop is dead.
  1896     if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
  1897       return true;;
  1901   return false;
  1904 void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
  1905   assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
  1907   metadata_Relocation* r = iter_at_metadata->metadata_reloc();
  1908   // In this metadata, we must only follow those metadatas directly embedded in
  1909   // the code.  Other metadatas (oop_index>0) are seen as part of
  1910   // the metadata section below.
  1911   assert(1 == (r->metadata_is_immediate()) +
  1912          (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
  1913          "metadata must be found in exactly one place");
  1914   if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
  1915     Metadata* md = r->metadata_value();
  1916     if (md != _method) Metadata::mark_on_stack(md);
  1920 void nmethod::mark_metadata_on_stack_non_relocs() {
  1921     // Visit the metadata section
  1922     for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
  1923       if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
  1924       Metadata* md = *p;
  1925       Metadata::mark_on_stack(md);
  1928     // Visit metadata not embedded in the other places.
  1929     if (_method != NULL) Metadata::mark_on_stack(_method);
  1932 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
  1933   ResourceMark rm;
  1935   // Make sure the oop's ready to receive visitors
  1936   assert(!is_zombie() && !is_unloaded(),
  1937          "should not call follow on zombie or unloaded nmethod");
  1939   // If the method is not entrant then a JMP is plastered over the
  1940   // first few bytes.  If an oop in the old code was there, that oop
  1941   // should not get GC'd.  Skip the first few bytes of oops on
  1942   // not-entrant methods.
  1943   address low_boundary = verified_entry_point();
  1944   if (is_not_entrant()) {
  1945     low_boundary += NativeJump::instruction_size;
  1946     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1947     // (See comment above.)
  1950   // The RedefineClasses() API can cause the class unloading invariant
  1951   // to no longer be true. See jvmtiExport.hpp for details.
  1952   // Also, leave a debugging breadcrumb in local flag.
  1953   bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  1954   if (a_class_was_redefined) {
  1955     // This set of the unloading_occurred flag is done before the
  1956     // call to post_compiled_method_unload() so that the unloading
  1957     // of this nmethod is reported.
  1958     unloading_occurred = true;
  1961   // When class redefinition is used all metadata in the CodeCache has to be recorded,
  1962   // so that unused "previous versions" can be purged. Since walking the CodeCache can
  1963   // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
  1964   bool mark_metadata_on_stack = a_class_was_redefined;
  1966   // Exception cache
  1967   clean_exception_cache(is_alive);
  1969   bool is_unloaded = false;
  1970   bool postponed = false;
  1972   RelocIterator iter(this, low_boundary);
  1973   while(iter.next()) {
  1975     switch (iter.type()) {
  1977     case relocInfo::virtual_call_type:
  1978       if (unloading_occurred) {
  1979         // If class unloading occurred we first iterate over all inline caches and
  1980         // clear ICs where the cached oop is referring to an unloaded klass or method.
  1981         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
  1984       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  1985       break;
  1987     case relocInfo::opt_virtual_call_type:
  1988       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  1989       break;
  1991     case relocInfo::static_call_type:
  1992       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
  1993       break;
  1995     case relocInfo::oop_type:
  1996       if (!is_unloaded) {
  1997         is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
  1999       break;
  2001     case relocInfo::metadata_type:
  2002       if (mark_metadata_on_stack) {
  2003         mark_metadata_on_stack_at(&iter);
  2008   if (mark_metadata_on_stack) {
  2009     mark_metadata_on_stack_non_relocs();
  2012   if (is_unloaded) {
  2013     return postponed;
  2016   // Scopes
  2017   for (oop* p = oops_begin(); p < oops_end(); p++) {
  2018     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  2019     if (can_unload(is_alive, p, unloading_occurred)) {
  2020       is_unloaded = true;
  2021       break;
  2025   if (is_unloaded) {
  2026     return postponed;
  2029   // Ensure that all metadata is still alive
  2030   verify_metadata_loaders(low_boundary, is_alive);
  2032   return postponed;
  2035 void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
  2036   ResourceMark rm;
  2038   // Make sure the oop's ready to receive visitors
  2039   assert(!is_zombie(),
  2040          "should not call follow on zombie nmethod");
  2042   // If the method is not entrant then a JMP is plastered over the
  2043   // first few bytes.  If an oop in the old code was there, that oop
  2044   // should not get GC'd.  Skip the first few bytes of oops on
  2045   // not-entrant methods.
  2046   address low_boundary = verified_entry_point();
  2047   if (is_not_entrant()) {
  2048     low_boundary += NativeJump::instruction_size;
  2049     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  2050     // (See comment above.)
  2053   RelocIterator iter(this, low_boundary);
  2054   while(iter.next()) {
  2056     switch (iter.type()) {
  2058     case relocInfo::virtual_call_type:
  2059       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  2060       break;
  2062     case relocInfo::opt_virtual_call_type:
  2063       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
  2064       break;
  2066     case relocInfo::static_call_type:
  2067       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
  2068       break;
  2073 #ifdef ASSERT
  2075 class CheckClass : AllStatic {
  2076   static BoolObjectClosure* _is_alive;
  2078   // Check class_loader is alive for this bit of metadata.
  2079   static void check_class(Metadata* md) {
  2080     Klass* klass = NULL;
  2081     if (md->is_klass()) {
  2082       klass = ((Klass*)md);
  2083     } else if (md->is_method()) {
  2084       klass = ((Method*)md)->method_holder();
  2085     } else if (md->is_methodData()) {
  2086       klass = ((MethodData*)md)->method()->method_holder();
  2087     } else {
  2088       md->print();
  2089       ShouldNotReachHere();
  2091     assert(klass->is_loader_alive(_is_alive), "must be alive");
  2093  public:
  2094   static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
  2095     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
  2096     _is_alive = is_alive;
  2097     nm->metadata_do(check_class);
  2099 };
  2101 // This is called during a safepoint so can use static data
  2102 BoolObjectClosure* CheckClass::_is_alive = NULL;
  2103 #endif // ASSERT
  2106 // Processing of oop references should have been sufficient to keep
  2107 // all strong references alive.  Any weak references should have been
  2108 // cleared as well.  Visit all the metadata and ensure that it's
  2109 // really alive.
  2110 void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
  2111 #ifdef ASSERT
  2112     RelocIterator iter(this, low_boundary);
  2113     while (iter.next()) {
  2114     // static_stub_Relocations may have dangling references to
  2115     // Method*s so trim them out here.  Otherwise it looks like
  2116     // compiled code is maintaining a link to dead metadata.
  2117     address static_call_addr = NULL;
  2118     if (iter.type() == relocInfo::opt_virtual_call_type) {
  2119       CompiledIC* cic = CompiledIC_at(&iter);
  2120       if (!cic->is_call_to_interpreted()) {
  2121         static_call_addr = iter.addr();
  2123     } else if (iter.type() == relocInfo::static_call_type) {
  2124       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
  2125       if (!csc->is_call_to_interpreted()) {
  2126         static_call_addr = iter.addr();
  2129     if (static_call_addr != NULL) {
  2130       RelocIterator sciter(this, low_boundary);
  2131       while (sciter.next()) {
  2132         if (sciter.type() == relocInfo::static_stub_type &&
  2133             sciter.static_stub_reloc()->static_call() == static_call_addr) {
  2134           sciter.static_stub_reloc()->clear_inline_cache();
  2139   // Check that the metadata embedded in the nmethod is alive
  2140   CheckClass::do_check_class(is_alive, this);
  2141 #endif
  2145 // Iterate over metadata calling this function.   Used by RedefineClasses
  2146 void nmethod::metadata_do(void f(Metadata*)) {
  2147   address low_boundary = verified_entry_point();
  2148   if (is_not_entrant()) {
  2149     low_boundary += NativeJump::instruction_size;
  2150     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  2151     // (See comment above.)
  2154     // Visit all immediate references that are embedded in the instruction stream.
  2155     RelocIterator iter(this, low_boundary);
  2156     while (iter.next()) {
  2157       if (iter.type() == relocInfo::metadata_type ) {
  2158         metadata_Relocation* r = iter.metadata_reloc();
  2159         // In this metadata, we must only follow those metadatas directly embedded in
  2160         // the code.  Other metadatas (oop_index>0) are seen as part of
  2161         // the metadata section below.
  2162         assert(1 == (r->metadata_is_immediate()) +
  2163                (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
  2164                "metadata must be found in exactly one place");
  2165         if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
  2166           Metadata* md = r->metadata_value();
  2167           f(md);
  2169       } else if (iter.type() == relocInfo::virtual_call_type) {
  2170         // Check compiledIC holders associated with this nmethod
  2171         CompiledIC *ic = CompiledIC_at(&iter);
  2172         if (ic->is_icholder_call()) {
  2173           CompiledICHolder* cichk = ic->cached_icholder();
  2174           f(cichk->holder_method());
  2175           f(cichk->holder_klass());
  2176         } else {
  2177           Metadata* ic_oop = ic->cached_metadata();
  2178           if (ic_oop != NULL) {
  2179             f(ic_oop);
  2186   // Visit the metadata section
  2187   for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
  2188     if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
  2189     Metadata* md = *p;
  2190     f(md);
  2193   // Visit metadata not embedded in the other places.
  2194   if (_method != NULL) f(_method);
  2197 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
  2198   // make sure the oops ready to receive visitors
  2199   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
  2200   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
  2202   // If the method is not entrant or zombie then a JMP is plastered over the
  2203   // first few bytes.  If an oop in the old code was there, that oop
  2204   // should not get GC'd.  Skip the first few bytes of oops on
  2205   // not-entrant methods.
  2206   address low_boundary = verified_entry_point();
  2207   if (is_not_entrant()) {
  2208     low_boundary += NativeJump::instruction_size;
  2209     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  2210     // (See comment above.)
  2213   RelocIterator iter(this, low_boundary);
  2215   while (iter.next()) {
  2216     if (iter.type() == relocInfo::oop_type ) {
  2217       oop_Relocation* r = iter.oop_reloc();
  2218       // In this loop, we must only follow those oops directly embedded in
  2219       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
  2220       assert(1 == (r->oop_is_immediate()) +
  2221                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  2222              "oop must be found in exactly one place");
  2223       if (r->oop_is_immediate() && r->oop_value() != NULL) {
  2224         f->do_oop(r->oop_addr());
  2229   // Scopes
  2230   // This includes oop constants not inlined in the code stream.
  2231   for (oop* p = oops_begin(); p < oops_end(); p++) {
  2232     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  2233     f->do_oop(p);
  2237 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
  2239 nmethod* volatile nmethod::_oops_do_mark_nmethods;
  2241 // An nmethod is "marked" if its _mark_link is set non-null.
  2242 // Even if it is the end of the linked list, it will have a non-null link value,
  2243 // as long as it is on the list.
  2244 // This code must be MP safe, because it is used from parallel GC passes.
  2245 bool nmethod::test_set_oops_do_mark() {
  2246   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  2247   nmethod* observed_mark_link = _oops_do_mark_link;
  2248   if (observed_mark_link == NULL) {
  2249     // Claim this nmethod for this thread to mark.
  2250     observed_mark_link = (nmethod*)
  2251       Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
  2252     if (observed_mark_link == NULL) {
  2254       // Atomically append this nmethod (now claimed) to the head of the list:
  2255       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
  2256       for (;;) {
  2257         nmethod* required_mark_nmethods = observed_mark_nmethods;
  2258         _oops_do_mark_link = required_mark_nmethods;
  2259         observed_mark_nmethods = (nmethod*)
  2260           Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
  2261         if (observed_mark_nmethods == required_mark_nmethods)
  2262           break;
  2264       // Mark was clear when we first saw this guy.
  2265       NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
  2266       return false;
  2269   // On fall through, another racing thread marked this nmethod before we did.
  2270   return true;
  2273 void nmethod::oops_do_marking_prologue() {
  2274   NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
  2275   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  2276   // We use cmpxchg_ptr instead of regular assignment here because the user
  2277   // may fork a bunch of threads, and we need them all to see the same state.
  2278   void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
  2279   guarantee(observed == NULL, "no races in this sequential code");
  2282 void nmethod::oops_do_marking_epilogue() {
  2283   assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
  2284   nmethod* cur = _oops_do_mark_nmethods;
  2285   while (cur != NMETHOD_SENTINEL) {
  2286     assert(cur != NULL, "not NULL-terminated");
  2287     nmethod* next = cur->_oops_do_mark_link;
  2288     cur->_oops_do_mark_link = NULL;
  2289     cur->verify_oop_relocations();
  2290     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
  2291     cur = next;
  2293   void* required = _oops_do_mark_nmethods;
  2294   void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
  2295   guarantee(observed == required, "no races in this sequential code");
  2296   NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
  2299 class DetectScavengeRoot: public OopClosure {
  2300   bool     _detected_scavenge_root;
  2301 public:
  2302   DetectScavengeRoot() : _detected_scavenge_root(false)
  2303   { NOT_PRODUCT(_print_nm = NULL); }
  2304   bool detected_scavenge_root() { return _detected_scavenge_root; }
  2305   virtual void do_oop(oop* p) {
  2306     if ((*p) != NULL && (*p)->is_scavengable()) {
  2307       NOT_PRODUCT(maybe_print(p));
  2308       _detected_scavenge_root = true;
  2311   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2313 #ifndef PRODUCT
  2314   nmethod* _print_nm;
  2315   void maybe_print(oop* p) {
  2316     if (_print_nm == NULL)  return;
  2317     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
  2318     tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
  2319                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
  2320                   (void *)(*p), (intptr_t)p);
  2321     (*p)->print();
  2323 #endif //PRODUCT
  2324 };
  2326 bool nmethod::detect_scavenge_root_oops() {
  2327   DetectScavengeRoot detect_scavenge_root;
  2328   NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
  2329   oops_do(&detect_scavenge_root);
  2330   return detect_scavenge_root.detected_scavenge_root();
  2333 // Method that knows how to preserve outgoing arguments at call. This method must be
  2334 // called with a frame corresponding to a Java invoke
  2335 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
  2336 #ifndef SHARK
  2337   if (!method()->is_native()) {
  2338     SimpleScopeDesc ssd(this, fr.pc());
  2339     Bytecode_invoke call(ssd.method(), ssd.bci());
  2340     bool has_receiver = call.has_receiver();
  2341     bool has_appendix = call.has_appendix();
  2342     Symbol* signature = call.signature();
  2343     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
  2345 #endif // !SHARK
  2349 oop nmethod::embeddedOop_at(u_char* p) {
  2350   RelocIterator iter(this, p, p + 1);
  2351   while (iter.next())
  2352     if (iter.type() == relocInfo::oop_type) {
  2353       return iter.oop_reloc()->oop_value();
  2355   return NULL;
  2359 inline bool includes(void* p, void* from, void* to) {
  2360   return from <= p && p < to;
  2364 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
  2365   assert(count >= 2, "must be sentinel values, at least");
  2367 #ifdef ASSERT
  2368   // must be sorted and unique; we do a binary search in find_pc_desc()
  2369   int prev_offset = pcs[0].pc_offset();
  2370   assert(prev_offset == PcDesc::lower_offset_limit,
  2371          "must start with a sentinel");
  2372   for (int i = 1; i < count; i++) {
  2373     int this_offset = pcs[i].pc_offset();
  2374     assert(this_offset > prev_offset, "offsets must be sorted");
  2375     prev_offset = this_offset;
  2377   assert(prev_offset == PcDesc::upper_offset_limit,
  2378          "must end with a sentinel");
  2379 #endif //ASSERT
  2381   // Search for MethodHandle invokes and tag the nmethod.
  2382   for (int i = 0; i < count; i++) {
  2383     if (pcs[i].is_method_handle_invoke()) {
  2384       set_has_method_handle_invokes(true);
  2385       break;
  2388   assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
  2390   int size = count * sizeof(PcDesc);
  2391   assert(scopes_pcs_size() >= size, "oob");
  2392   memcpy(scopes_pcs_begin(), pcs, size);
  2394   // Adjust the final sentinel downward.
  2395   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
  2396   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
  2397   last_pc->set_pc_offset(content_size() + 1);
  2398   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
  2399     // Fill any rounding gaps with copies of the last record.
  2400     last_pc[1] = last_pc[0];
  2402   // The following assert could fail if sizeof(PcDesc) is not
  2403   // an integral multiple of oopSize (the rounding term).
  2404   // If it fails, change the logic to always allocate a multiple
  2405   // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
  2406   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
  2409 void nmethod::copy_scopes_data(u_char* buffer, int size) {
  2410   assert(scopes_data_size() >= size, "oob");
  2411   memcpy(scopes_data_begin(), buffer, size);
  2415 #ifdef ASSERT
  2416 static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
  2417   PcDesc* lower = nm->scopes_pcs_begin();
  2418   PcDesc* upper = nm->scopes_pcs_end();
  2419   lower += 1; // exclude initial sentinel
  2420   PcDesc* res = NULL;
  2421   for (PcDesc* p = lower; p < upper; p++) {
  2422     NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
  2423     if (match_desc(p, pc_offset, approximate)) {
  2424       if (res == NULL)
  2425         res = p;
  2426       else
  2427         res = (PcDesc*) badAddress;
  2430   return res;
  2432 #endif
  2435 // Finds a PcDesc with real-pc equal to "pc"
  2436 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
  2437   address base_address = code_begin();
  2438   if ((pc < base_address) ||
  2439       (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
  2440     return NULL;  // PC is wildly out of range
  2442   int pc_offset = (int) (pc - base_address);
  2444   // Check the PcDesc cache if it contains the desired PcDesc
  2445   // (This as an almost 100% hit rate.)
  2446   PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
  2447   if (res != NULL) {
  2448     assert(res == linear_search(this, pc_offset, approximate), "cache ok");
  2449     return res;
  2452   // Fallback algorithm: quasi-linear search for the PcDesc
  2453   // Find the last pc_offset less than the given offset.
  2454   // The successor must be the required match, if there is a match at all.
  2455   // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
  2456   PcDesc* lower = scopes_pcs_begin();
  2457   PcDesc* upper = scopes_pcs_end();
  2458   upper -= 1; // exclude final sentinel
  2459   if (lower >= upper)  return NULL;  // native method; no PcDescs at all
  2461 #define assert_LU_OK \
  2462   /* invariant on lower..upper during the following search: */ \
  2463   assert(lower->pc_offset() <  pc_offset, "sanity"); \
  2464   assert(upper->pc_offset() >= pc_offset, "sanity")
  2465   assert_LU_OK;
  2467   // Use the last successful return as a split point.
  2468   PcDesc* mid = _pc_desc_cache.last_pc_desc();
  2469   NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2470   if (mid->pc_offset() < pc_offset) {
  2471     lower = mid;
  2472   } else {
  2473     upper = mid;
  2476   // Take giant steps at first (4096, then 256, then 16, then 1)
  2477   const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
  2478   const int RADIX = (1 << LOG2_RADIX);
  2479   for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
  2480     while ((mid = lower + step) < upper) {
  2481       assert_LU_OK;
  2482       NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2483       if (mid->pc_offset() < pc_offset) {
  2484         lower = mid;
  2485       } else {
  2486         upper = mid;
  2487         break;
  2490     assert_LU_OK;
  2493   // Sneak up on the value with a linear search of length ~16.
  2494   while (true) {
  2495     assert_LU_OK;
  2496     mid = lower + 1;
  2497     NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2498     if (mid->pc_offset() < pc_offset) {
  2499       lower = mid;
  2500     } else {
  2501       upper = mid;
  2502       break;
  2505 #undef assert_LU_OK
  2507   if (match_desc(upper, pc_offset, approximate)) {
  2508     assert(upper == linear_search(this, pc_offset, approximate), "search ok");
  2509     _pc_desc_cache.add_pc_desc(upper);
  2510     return upper;
  2511   } else {
  2512     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
  2513     return NULL;
  2518 bool nmethod::check_all_dependencies() {
  2519   bool found_check = false;
  2520   // wholesale check of all dependencies
  2521   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2522     if (deps.check_dependency() != NULL) {
  2523       found_check = true;
  2524       NOT_DEBUG(break);
  2527   return found_check;  // tell caller if we found anything
  2530 bool nmethod::check_dependency_on(DepChange& changes) {
  2531   // What has happened:
  2532   // 1) a new class dependee has been added
  2533   // 2) dependee and all its super classes have been marked
  2534   bool found_check = false;  // set true if we are upset
  2535   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2536     // Evaluate only relevant dependencies.
  2537     if (deps.spot_check_dependency_at(changes) != NULL) {
  2538       found_check = true;
  2539       NOT_DEBUG(break);
  2542   return found_check;
  2545 bool nmethod::is_evol_dependent_on(Klass* dependee) {
  2546   InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
  2547   Array<Method*>* dependee_methods = dependee_ik->methods();
  2548   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2549     if (deps.type() == Dependencies::evol_method) {
  2550       Method* method = deps.method_argument(0);
  2551       for (int j = 0; j < dependee_methods->length(); j++) {
  2552         if (dependee_methods->at(j) == method) {
  2553           // RC_TRACE macro has an embedded ResourceMark
  2554           RC_TRACE(0x01000000,
  2555             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
  2556             _method->method_holder()->external_name(),
  2557             _method->name()->as_C_string(),
  2558             _method->signature()->as_C_string(), compile_id(),
  2559             method->method_holder()->external_name(),
  2560             method->name()->as_C_string(),
  2561             method->signature()->as_C_string()));
  2562           if (TraceDependencies || LogCompilation)
  2563             deps.log_dependency(dependee);
  2564           return true;
  2569   return false;
  2572 // Called from mark_for_deoptimization, when dependee is invalidated.
  2573 bool nmethod::is_dependent_on_method(Method* dependee) {
  2574   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2575     if (deps.type() != Dependencies::evol_method)
  2576       continue;
  2577     Method* method = deps.method_argument(0);
  2578     if (method == dependee) return true;
  2580   return false;
  2584 bool nmethod::is_patchable_at(address instr_addr) {
  2585   assert(insts_contains(instr_addr), "wrong nmethod used");
  2586   if (is_zombie()) {
  2587     // a zombie may never be patched
  2588     return false;
  2590   return true;
  2594 address nmethod::continuation_for_implicit_exception(address pc) {
  2595   // Exception happened outside inline-cache check code => we are inside
  2596   // an active nmethod => use cpc to determine a return address
  2597   int exception_offset = pc - code_begin();
  2598   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
  2599 #ifdef ASSERT
  2600   if (cont_offset == 0) {
  2601     Thread* thread = ThreadLocalStorage::get_thread_slow();
  2602     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
  2603     HandleMark hm(thread);
  2604     ResourceMark rm(thread);
  2605     CodeBlob* cb = CodeCache::find_blob(pc);
  2606     assert(cb != NULL && cb == this, "");
  2607     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
  2608     print();
  2609     method()->print_codes();
  2610     print_code();
  2611     print_pcs();
  2613 #endif
  2614   if (cont_offset == 0) {
  2615     // Let the normal error handling report the exception
  2616     return NULL;
  2618   return code_begin() + cont_offset;
  2623 void nmethod_init() {
  2624   // make sure you didn't forget to adjust the filler fields
  2625   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
  2629 //-------------------------------------------------------------------------------------------
  2632 // QQQ might we make this work from a frame??
  2633 nmethodLocker::nmethodLocker(address pc) {
  2634   CodeBlob* cb = CodeCache::find_blob(pc);
  2635   guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
  2636   _nm = (nmethod*)cb;
  2637   lock_nmethod(_nm);
  2640 // Only JvmtiDeferredEvent::compiled_method_unload_event()
  2641 // should pass zombie_ok == true.
  2642 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
  2643   if (nm == NULL)  return;
  2644   Atomic::inc(&nm->_lock_count);
  2645   guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
  2648 void nmethodLocker::unlock_nmethod(nmethod* nm) {
  2649   if (nm == NULL)  return;
  2650   Atomic::dec(&nm->_lock_count);
  2651   guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
  2655 // -----------------------------------------------------------------------------
  2656 // nmethod::get_deopt_original_pc
  2657 //
  2658 // Return the original PC for the given PC if:
  2659 // (a) the given PC belongs to a nmethod and
  2660 // (b) it is a deopt PC
  2661 address nmethod::get_deopt_original_pc(const frame* fr) {
  2662   if (fr->cb() == NULL)  return NULL;
  2664   nmethod* nm = fr->cb()->as_nmethod_or_null();
  2665   if (nm != NULL && nm->is_deopt_pc(fr->pc()))
  2666     return nm->get_original_pc(fr);
  2668   return NULL;
  2672 // -----------------------------------------------------------------------------
  2673 // MethodHandle
  2675 bool nmethod::is_method_handle_return(address return_pc) {
  2676   if (!has_method_handle_invokes())  return false;
  2677   PcDesc* pd = pc_desc_at(return_pc);
  2678   if (pd == NULL)
  2679     return false;
  2680   return pd->is_method_handle_invoke();
  2684 // -----------------------------------------------------------------------------
  2685 // Verification
  2687 class VerifyOopsClosure: public OopClosure {
  2688   nmethod* _nm;
  2689   bool     _ok;
  2690 public:
  2691   VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
  2692   bool ok() { return _ok; }
  2693   virtual void do_oop(oop* p) {
  2694     if ((*p) == NULL || (*p)->is_oop())  return;
  2695     if (_ok) {
  2696       _nm->print_nmethod(true);
  2697       _ok = false;
  2699     tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  2700                   (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  2702   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2703 };
  2705 void nmethod::verify() {
  2707   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  2708   // seems odd.
  2710   if( is_zombie() || is_not_entrant() )
  2711     return;
  2713   // Make sure all the entry points are correctly aligned for patching.
  2714   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
  2716   // assert(method()->is_oop(), "must be valid");
  2718   ResourceMark rm;
  2720   if (!CodeCache::contains(this)) {
  2721     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
  2724   if(is_native_method() )
  2725     return;
  2727   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
  2728   if (nm != this) {
  2729     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
  2730                   this));
  2733   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2734     if (! p->verify(this)) {
  2735       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
  2739   VerifyOopsClosure voc(this);
  2740   oops_do(&voc);
  2741   assert(voc.ok(), "embedded oops must be OK");
  2742   verify_scavenge_root_oops();
  2744   verify_scopes();
  2748 void nmethod::verify_interrupt_point(address call_site) {
  2749   // Verify IC only when nmethod installation is finished.
  2750   bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
  2751                       || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
  2752   if (is_installed) {
  2753     Thread *cur = Thread::current();
  2754     if (CompiledIC_lock->owner() == cur ||
  2755         ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
  2756          SafepointSynchronize::is_at_safepoint())) {
  2757       CompiledIC_at(this, call_site);
  2758       CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  2759     } else {
  2760       MutexLocker ml_verify (CompiledIC_lock);
  2761       CompiledIC_at(this, call_site);
  2765   PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
  2766   assert(pd != NULL, "PcDesc must exist");
  2767   for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
  2768                                      pd->obj_decode_offset(), pd->should_reexecute(),
  2769                                      pd->return_oop());
  2770        !sd->is_top(); sd = sd->sender()) {
  2771     sd->verify();
  2775 void nmethod::verify_scopes() {
  2776   if( !method() ) return;       // Runtime stubs have no scope
  2777   if (method()->is_native()) return; // Ignore stub methods.
  2778   // iterate through all interrupt point
  2779   // and verify the debug information is valid.
  2780   RelocIterator iter((nmethod*)this);
  2781   while (iter.next()) {
  2782     address stub = NULL;
  2783     switch (iter.type()) {
  2784       case relocInfo::virtual_call_type:
  2785         verify_interrupt_point(iter.addr());
  2786         break;
  2787       case relocInfo::opt_virtual_call_type:
  2788         stub = iter.opt_virtual_call_reloc()->static_stub();
  2789         verify_interrupt_point(iter.addr());
  2790         break;
  2791       case relocInfo::static_call_type:
  2792         stub = iter.static_call_reloc()->static_stub();
  2793         //verify_interrupt_point(iter.addr());
  2794         break;
  2795       case relocInfo::runtime_call_type:
  2796         address destination = iter.reloc()->value();
  2797         // Right now there is no way to find out which entries support
  2798         // an interrupt point.  It would be nice if we had this
  2799         // information in a table.
  2800         break;
  2802     assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
  2807 // -----------------------------------------------------------------------------
  2808 // Non-product code
  2809 #ifndef PRODUCT
  2811 class DebugScavengeRoot: public OopClosure {
  2812   nmethod* _nm;
  2813   bool     _ok;
  2814 public:
  2815   DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
  2816   bool ok() { return _ok; }
  2817   virtual void do_oop(oop* p) {
  2818     if ((*p) == NULL || !(*p)->is_scavengable())  return;
  2819     if (_ok) {
  2820       _nm->print_nmethod(true);
  2821       _ok = false;
  2823     tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  2824                   (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  2825     (*p)->print();
  2827   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2828 };
  2830 void nmethod::verify_scavenge_root_oops() {
  2831   if (UseG1GC) {
  2832     return;
  2835   if (!on_scavenge_root_list()) {
  2836     // Actually look inside, to verify the claim that it's clean.
  2837     DebugScavengeRoot debug_scavenge_root(this);
  2838     oops_do(&debug_scavenge_root);
  2839     if (!debug_scavenge_root.ok())
  2840       fatal("found an unadvertised bad scavengable oop in the code cache");
  2842   assert(scavenge_root_not_marked(), "");
  2845 #endif // PRODUCT
  2847 // Printing operations
  2849 void nmethod::print() const {
  2850   ResourceMark rm;
  2851   ttyLocker ttyl;   // keep the following output all in one block
  2853   tty->print("Compiled method ");
  2855   if (is_compiled_by_c1()) {
  2856     tty->print("(c1) ");
  2857   } else if (is_compiled_by_c2()) {
  2858     tty->print("(c2) ");
  2859   } else if (is_compiled_by_shark()) {
  2860     tty->print("(shark) ");
  2861   } else {
  2862     tty->print("(nm) ");
  2865   print_on(tty, NULL);
  2867   if (WizardMode) {
  2868     tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
  2869     tty->print(" for method " INTPTR_FORMAT , (address)method());
  2870     tty->print(" { ");
  2871     if (is_in_use())      tty->print("in_use ");
  2872     if (is_not_entrant()) tty->print("not_entrant ");
  2873     if (is_zombie())      tty->print("zombie ");
  2874     if (is_unloaded())    tty->print("unloaded ");
  2875     if (on_scavenge_root_list())  tty->print("scavenge_root ");
  2876     tty->print_cr("}:");
  2878   if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2879                                               (address)this,
  2880                                               (address)this + size(),
  2881                                               size());
  2882   if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2883                                               relocation_begin(),
  2884                                               relocation_end(),
  2885                                               relocation_size());
  2886   if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2887                                               consts_begin(),
  2888                                               consts_end(),
  2889                                               consts_size());
  2890   if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2891                                               insts_begin(),
  2892                                               insts_end(),
  2893                                               insts_size());
  2894   if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2895                                               stub_begin(),
  2896                                               stub_end(),
  2897                                               stub_size());
  2898   if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2899                                               oops_begin(),
  2900                                               oops_end(),
  2901                                               oops_size());
  2902   if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2903                                               metadata_begin(),
  2904                                               metadata_end(),
  2905                                               metadata_size());
  2906   if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2907                                               scopes_data_begin(),
  2908                                               scopes_data_end(),
  2909                                               scopes_data_size());
  2910   if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2911                                               scopes_pcs_begin(),
  2912                                               scopes_pcs_end(),
  2913                                               scopes_pcs_size());
  2914   if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2915                                               dependencies_begin(),
  2916                                               dependencies_end(),
  2917                                               dependencies_size());
  2918   if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2919                                               handler_table_begin(),
  2920                                               handler_table_end(),
  2921                                               handler_table_size());
  2922   if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2923                                               nul_chk_table_begin(),
  2924                                               nul_chk_table_end(),
  2925                                               nul_chk_table_size());
  2928 void nmethod::print_code() {
  2929   HandleMark hm;
  2930   ResourceMark m;
  2931   Disassembler::decode(this);
  2935 #ifndef PRODUCT
  2937 void nmethod::print_scopes() {
  2938   // Find the first pc desc for all scopes in the code and print it.
  2939   ResourceMark rm;
  2940   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2941     if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
  2942       continue;
  2944     ScopeDesc* sd = scope_desc_at(p->real_pc(this));
  2945     sd->print_on(tty, p);
  2949 void nmethod::print_dependencies() {
  2950   ResourceMark rm;
  2951   ttyLocker ttyl;   // keep the following output all in one block
  2952   tty->print_cr("Dependencies:");
  2953   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2954     deps.print_dependency();
  2955     Klass* ctxk = deps.context_type();
  2956     if (ctxk != NULL) {
  2957       if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
  2958         tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
  2961     deps.log_dependency();  // put it into the xml log also
  2966 void nmethod::print_relocations() {
  2967   ResourceMark m;       // in case methods get printed via the debugger
  2968   tty->print_cr("relocations:");
  2969   RelocIterator iter(this);
  2970   iter.print();
  2971   if (UseRelocIndex) {
  2972     jint* index_end   = (jint*)relocation_end() - 1;
  2973     jint  index_size  = *index_end;
  2974     jint* index_start = (jint*)( (address)index_end - index_size );
  2975     tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
  2976     if (index_size > 0) {
  2977       jint* ip;
  2978       for (ip = index_start; ip+2 <= index_end; ip += 2)
  2979         tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
  2980                       ip[0],
  2981                       ip[1],
  2982                       header_end()+ip[0],
  2983                       relocation_begin()-1+ip[1]);
  2984       for (; ip < index_end; ip++)
  2985         tty->print_cr("  (%d ?)", ip[0]);
  2986       tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
  2987       ip++;
  2988       tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
  2994 void nmethod::print_pcs() {
  2995   ResourceMark m;       // in case methods get printed via debugger
  2996   tty->print_cr("pc-bytecode offsets:");
  2997   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2998     p->print(this);
  3002 #endif // PRODUCT
  3004 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
  3005   RelocIterator iter(this, begin, end);
  3006   bool have_one = false;
  3007   while (iter.next()) {
  3008     have_one = true;
  3009     switch (iter.type()) {
  3010         case relocInfo::none:                  return "no_reloc";
  3011         case relocInfo::oop_type: {
  3012           stringStream st;
  3013           oop_Relocation* r = iter.oop_reloc();
  3014           oop obj = r->oop_value();
  3015           st.print("oop(");
  3016           if (obj == NULL) st.print("NULL");
  3017           else obj->print_value_on(&st);
  3018           st.print(")");
  3019           return st.as_string();
  3021         case relocInfo::metadata_type: {
  3022           stringStream st;
  3023           metadata_Relocation* r = iter.metadata_reloc();
  3024           Metadata* obj = r->metadata_value();
  3025           st.print("metadata(");
  3026           if (obj == NULL) st.print("NULL");
  3027           else obj->print_value_on(&st);
  3028           st.print(")");
  3029           return st.as_string();
  3031         case relocInfo::virtual_call_type:     return "virtual_call";
  3032         case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
  3033         case relocInfo::static_call_type:      return "static_call";
  3034         case relocInfo::static_stub_type:      return "static_stub";
  3035         case relocInfo::runtime_call_type:     return "runtime_call";
  3036         case relocInfo::external_word_type:    return "external_word";
  3037         case relocInfo::internal_word_type:    return "internal_word";
  3038         case relocInfo::section_word_type:     return "section_word";
  3039         case relocInfo::poll_type:             return "poll";
  3040         case relocInfo::poll_return_type:      return "poll_return";
  3041         case relocInfo::type_mask:             return "type_bit_mask";
  3044   return have_one ? "other" : NULL;
  3047 // Return a the last scope in (begin..end]
  3048 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
  3049   PcDesc* p = pc_desc_near(begin+1);
  3050   if (p != NULL && p->real_pc(this) <= end) {
  3051     return new ScopeDesc(this, p->scope_decode_offset(),
  3052                          p->obj_decode_offset(), p->should_reexecute(),
  3053                          p->return_oop());
  3055   return NULL;
  3058 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
  3059   if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
  3060   if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
  3061   if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
  3062   if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
  3063   if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
  3065   if (has_method_handle_invokes())
  3066     if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
  3068   if (block_begin == consts_begin())            stream->print_cr("[Constants]");
  3070   if (block_begin == entry_point()) {
  3071     methodHandle m = method();
  3072     if (m.not_null()) {
  3073       stream->print("  # ");
  3074       m->print_value_on(stream);
  3075       stream->cr();
  3077     if (m.not_null() && !is_osr_method()) {
  3078       ResourceMark rm;
  3079       int sizeargs = m->size_of_parameters();
  3080       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  3081       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  3083         int sig_index = 0;
  3084         if (!m->is_static())
  3085           sig_bt[sig_index++] = T_OBJECT; // 'this'
  3086         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
  3087           BasicType t = ss.type();
  3088           sig_bt[sig_index++] = t;
  3089           if (type2size[t] == 2) {
  3090             sig_bt[sig_index++] = T_VOID;
  3091           } else {
  3092             assert(type2size[t] == 1, "size is 1 or 2");
  3095         assert(sig_index == sizeargs, "");
  3097       const char* spname = "sp"; // make arch-specific?
  3098       intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
  3099       int stack_slot_offset = this->frame_size() * wordSize;
  3100       int tab1 = 14, tab2 = 24;
  3101       int sig_index = 0;
  3102       int arg_index = (m->is_static() ? 0 : -1);
  3103       bool did_old_sp = false;
  3104       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
  3105         bool at_this = (arg_index == -1);
  3106         bool at_old_sp = false;
  3107         BasicType t = (at_this ? T_OBJECT : ss.type());
  3108         assert(t == sig_bt[sig_index], "sigs in sync");
  3109         if (at_this)
  3110           stream->print("  # this: ");
  3111         else
  3112           stream->print("  # parm%d: ", arg_index);
  3113         stream->move_to(tab1);
  3114         VMReg fst = regs[sig_index].first();
  3115         VMReg snd = regs[sig_index].second();
  3116         if (fst->is_reg()) {
  3117           stream->print("%s", fst->name());
  3118           if (snd->is_valid())  {
  3119             stream->print(":%s", snd->name());
  3121         } else if (fst->is_stack()) {
  3122           int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
  3123           if (offset == stack_slot_offset)  at_old_sp = true;
  3124           stream->print("[%s+0x%x]", spname, offset);
  3125         } else {
  3126           stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
  3128         stream->print(" ");
  3129         stream->move_to(tab2);
  3130         stream->print("= ");
  3131         if (at_this) {
  3132           m->method_holder()->print_value_on(stream);
  3133         } else {
  3134           bool did_name = false;
  3135           if (!at_this && ss.is_object()) {
  3136             Symbol* name = ss.as_symbol_or_null();
  3137             if (name != NULL) {
  3138               name->print_value_on(stream);
  3139               did_name = true;
  3142           if (!did_name)
  3143             stream->print("%s", type2name(t));
  3145         if (at_old_sp) {
  3146           stream->print("  (%s of caller)", spname);
  3147           did_old_sp = true;
  3149         stream->cr();
  3150         sig_index += type2size[t];
  3151         arg_index += 1;
  3152         if (!at_this)  ss.next();
  3154       if (!did_old_sp) {
  3155         stream->print("  # ");
  3156         stream->move_to(tab1);
  3157         stream->print("[%s+0x%x]", spname, stack_slot_offset);
  3158         stream->print("  (%s of caller)", spname);
  3159         stream->cr();
  3165 void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
  3166   // First, find an oopmap in (begin, end].
  3167   // We use the odd half-closed interval so that oop maps and scope descs
  3168   // which are tied to the byte after a call are printed with the call itself.
  3169   address base = code_begin();
  3170   OopMapSet* oms = oop_maps();
  3171   if (oms != NULL) {
  3172     for (int i = 0, imax = oms->size(); i < imax; i++) {
  3173       OopMap* om = oms->at(i);
  3174       address pc = base + om->offset();
  3175       if (pc > begin) {
  3176         if (pc <= end) {
  3177           st->move_to(column);
  3178           st->print("; ");
  3179           om->print_on(st);
  3181         break;
  3186   // Print any debug info present at this pc.
  3187   ScopeDesc* sd  = scope_desc_in(begin, end);
  3188   if (sd != NULL) {
  3189     st->move_to(column);
  3190     if (sd->bci() == SynchronizationEntryBCI) {
  3191       st->print(";*synchronization entry");
  3192     } else {
  3193       if (sd->method() == NULL) {
  3194         st->print("method is NULL");
  3195       } else if (sd->method()->is_native()) {
  3196         st->print("method is native");
  3197       } else {
  3198         Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
  3199         st->print(";*%s", Bytecodes::name(bc));
  3200         switch (bc) {
  3201         case Bytecodes::_invokevirtual:
  3202         case Bytecodes::_invokespecial:
  3203         case Bytecodes::_invokestatic:
  3204         case Bytecodes::_invokeinterface:
  3206             Bytecode_invoke invoke(sd->method(), sd->bci());
  3207             st->print(" ");
  3208             if (invoke.name() != NULL)
  3209               invoke.name()->print_symbol_on(st);
  3210             else
  3211               st->print("<UNKNOWN>");
  3212             break;
  3214         case Bytecodes::_getfield:
  3215         case Bytecodes::_putfield:
  3216         case Bytecodes::_getstatic:
  3217         case Bytecodes::_putstatic:
  3219             Bytecode_field field(sd->method(), sd->bci());
  3220             st->print(" ");
  3221             if (field.name() != NULL)
  3222               field.name()->print_symbol_on(st);
  3223             else
  3224               st->print("<UNKNOWN>");
  3230     // Print all scopes
  3231     for (;sd != NULL; sd = sd->sender()) {
  3232       st->move_to(column);
  3233       st->print("; -");
  3234       if (sd->method() == NULL) {
  3235         st->print("method is NULL");
  3236       } else {
  3237         sd->method()->print_short_name(st);
  3239       int lineno = sd->method()->line_number_from_bci(sd->bci());
  3240       if (lineno != -1) {
  3241         st->print("@%d (line %d)", sd->bci(), lineno);
  3242       } else {
  3243         st->print("@%d", sd->bci());
  3245       st->cr();
  3249   // Print relocation information
  3250   const char* str = reloc_string_for(begin, end);
  3251   if (str != NULL) {
  3252     if (sd != NULL) st->cr();
  3253     st->move_to(column);
  3254     st->print(";   {%s}", str);
  3256   int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
  3257   if (cont_offset != 0) {
  3258     st->move_to(column);
  3259     st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
  3264 #ifndef PRODUCT
  3266 void nmethod::print_value_on(outputStream* st) const {
  3267   st->print("nmethod");
  3268   print_on(st, NULL);
  3271 void nmethod::print_calls(outputStream* st) {
  3272   RelocIterator iter(this);
  3273   while (iter.next()) {
  3274     switch (iter.type()) {
  3275     case relocInfo::virtual_call_type:
  3276     case relocInfo::opt_virtual_call_type: {
  3277       VerifyMutexLocker mc(CompiledIC_lock);
  3278       CompiledIC_at(&iter)->print();
  3279       break;
  3281     case relocInfo::static_call_type:
  3282       st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
  3283       compiledStaticCall_at(iter.reloc())->print();
  3284       break;
  3289 void nmethod::print_handler_table() {
  3290   ExceptionHandlerTable(this).print();
  3293 void nmethod::print_nul_chk_table() {
  3294   ImplicitExceptionTable(this).print(code_begin());
  3297 void nmethod::print_statistics() {
  3298   ttyLocker ttyl;
  3299   if (xtty != NULL)  xtty->head("statistics type='nmethod'");
  3300   nmethod_stats.print_native_nmethod_stats();
  3301   nmethod_stats.print_nmethod_stats();
  3302   DebugInformationRecorder::print_statistics();
  3303   nmethod_stats.print_pc_stats();
  3304   Dependencies::print_statistics();
  3305   if (xtty != NULL)  xtty->tail("statistics");
  3308 #endif // PRODUCT

mercurial