src/share/vm/code/nmethod.cpp

Mon, 28 Feb 2011 06:07:12 -0800

author
twisti
date
Mon, 28 Feb 2011 06:07:12 -0800
changeset 2603
1b4e6a5d98e0
parent 2511
bf8517f4e4d0
child 2621
3d5a546351ef
child 2624
46a56fac55c7
child 2631
4cd9add59b1e
permissions
-rw-r--r--

7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
Reviewed-by: never, bdelsart

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/codeCache.hpp"
    27 #include "code/compiledIC.hpp"
    28 #include "code/nmethod.hpp"
    29 #include "code/scopeDesc.hpp"
    30 #include "compiler/abstractCompiler.hpp"
    31 #include "compiler/compileLog.hpp"
    32 #include "compiler/compilerOracle.hpp"
    33 #include "compiler/disassembler.hpp"
    34 #include "interpreter/bytecode.hpp"
    35 #include "oops/methodDataOop.hpp"
    36 #include "prims/jvmtiRedefineClassesTrace.hpp"
    37 #include "prims/jvmtiImpl.hpp"
    38 #include "runtime/sharedRuntime.hpp"
    39 #include "runtime/sweeper.hpp"
    40 #include "utilities/dtrace.hpp"
    41 #include "utilities/events.hpp"
    42 #include "utilities/xmlstream.hpp"
    43 #ifdef SHARK
    44 #include "shark/sharkCompiler.hpp"
    45 #endif
    47 #ifdef DTRACE_ENABLED
    49 // Only bother with this argument setup if dtrace is available
    51 HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
    52   const char*, int, const char*, int, const char*, int, void*, size_t);
    54 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
    55   char*, int, char*, int, char*, int);
    57 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
    58   {                                                                       \
    59     methodOop m = (method);                                               \
    60     if (m != NULL) {                                                      \
    61       Symbol* klass_name = m->klass_name();                               \
    62       Symbol* name = m->name();                                           \
    63       Symbol* signature = m->signature();                                 \
    64       HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
    65         klass_name->bytes(), klass_name->utf8_length(),                   \
    66         name->bytes(), name->utf8_length(),                               \
    67         signature->bytes(), signature->utf8_length());                    \
    68     }                                                                     \
    69   }
    71 #else //  ndef DTRACE_ENABLED
    73 #define DTRACE_METHOD_UNLOAD_PROBE(method)
    75 #endif
    77 bool nmethod::is_compiled_by_c1() const {
    78   if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
    79   if (is_native_method()) return false;
    80   return compiler()->is_c1();
    81 }
    82 bool nmethod::is_compiled_by_c2() const {
    83   if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
    84   if (is_native_method()) return false;
    85   return compiler()->is_c2();
    86 }
    87 bool nmethod::is_compiled_by_shark() const {
    88   if (is_native_method()) return false;
    89   assert(compiler() != NULL, "must be");
    90   return compiler()->is_shark();
    91 }
    95 //---------------------------------------------------------------------------------
    96 // NMethod statistics
    97 // They are printed under various flags, including:
    98 //   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
    99 // (In the latter two cases, they like other stats are printed to the log only.)
   101 #ifndef PRODUCT
   102 // These variables are put into one block to reduce relocations
   103 // and make it simpler to print from the debugger.
   104 static
   105 struct nmethod_stats_struct {
   106   int nmethod_count;
   107   int total_size;
   108   int relocation_size;
   109   int consts_size;
   110   int insts_size;
   111   int stub_size;
   112   int scopes_data_size;
   113   int scopes_pcs_size;
   114   int dependencies_size;
   115   int handler_table_size;
   116   int nul_chk_table_size;
   117   int oops_size;
   119   void note_nmethod(nmethod* nm) {
   120     nmethod_count += 1;
   121     total_size          += nm->size();
   122     relocation_size     += nm->relocation_size();
   123     consts_size         += nm->consts_size();
   124     insts_size          += nm->insts_size();
   125     stub_size           += nm->stub_size();
   126     oops_size           += nm->oops_size();
   127     scopes_data_size    += nm->scopes_data_size();
   128     scopes_pcs_size     += nm->scopes_pcs_size();
   129     dependencies_size   += nm->dependencies_size();
   130     handler_table_size  += nm->handler_table_size();
   131     nul_chk_table_size  += nm->nul_chk_table_size();
   132   }
   133   void print_nmethod_stats() {
   134     if (nmethod_count == 0)  return;
   135     tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
   136     if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
   137     if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
   138     if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
   139     if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
   140     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
   141     if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
   142     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
   143     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
   144     if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
   145     if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
   146     if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
   147   }
   149   int native_nmethod_count;
   150   int native_total_size;
   151   int native_relocation_size;
   152   int native_insts_size;
   153   int native_oops_size;
   154   void note_native_nmethod(nmethod* nm) {
   155     native_nmethod_count += 1;
   156     native_total_size       += nm->size();
   157     native_relocation_size  += nm->relocation_size();
   158     native_insts_size       += nm->insts_size();
   159     native_oops_size        += nm->oops_size();
   160   }
   161   void print_native_nmethod_stats() {
   162     if (native_nmethod_count == 0)  return;
   163     tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
   164     if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
   165     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
   166     if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
   167     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
   168   }
   170   int pc_desc_resets;   // number of resets (= number of caches)
   171   int pc_desc_queries;  // queries to nmethod::find_pc_desc
   172   int pc_desc_approx;   // number of those which have approximate true
   173   int pc_desc_repeats;  // number of _last_pc_desc hits
   174   int pc_desc_hits;     // number of LRU cache hits
   175   int pc_desc_tests;    // total number of PcDesc examinations
   176   int pc_desc_searches; // total number of quasi-binary search steps
   177   int pc_desc_adds;     // number of LUR cache insertions
   179   void print_pc_stats() {
   180     tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
   181                   pc_desc_queries,
   182                   (double)(pc_desc_tests + pc_desc_searches)
   183                   / pc_desc_queries);
   184     tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
   185                   pc_desc_resets,
   186                   pc_desc_queries, pc_desc_approx,
   187                   pc_desc_repeats, pc_desc_hits,
   188                   pc_desc_tests, pc_desc_searches, pc_desc_adds);
   189   }
   190 } nmethod_stats;
   191 #endif //PRODUCT
   194 //---------------------------------------------------------------------------------
   197 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
   198   assert(pc != NULL, "Must be non null");
   199   assert(exception.not_null(), "Must be non null");
   200   assert(handler != NULL, "Must be non null");
   202   _count = 0;
   203   _exception_type = exception->klass();
   204   _next = NULL;
   206   add_address_and_handler(pc,handler);
   207 }
   210 address ExceptionCache::match(Handle exception, address pc) {
   211   assert(pc != NULL,"Must be non null");
   212   assert(exception.not_null(),"Must be non null");
   213   if (exception->klass() == exception_type()) {
   214     return (test_address(pc));
   215   }
   217   return NULL;
   218 }
   221 bool ExceptionCache::match_exception_with_space(Handle exception) {
   222   assert(exception.not_null(),"Must be non null");
   223   if (exception->klass() == exception_type() && count() < cache_size) {
   224     return true;
   225   }
   226   return false;
   227 }
   230 address ExceptionCache::test_address(address addr) {
   231   for (int i=0; i<count(); i++) {
   232     if (pc_at(i) == addr) {
   233       return handler_at(i);
   234     }
   235   }
   236   return NULL;
   237 }
   240 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
   241   if (test_address(addr) == handler) return true;
   242   if (count() < cache_size) {
   243     set_pc_at(count(),addr);
   244     set_handler_at(count(), handler);
   245     increment_count();
   246     return true;
   247   }
   248   return false;
   249 }
   252 // private method for handling exception cache
   253 // These methods are private, and used to manipulate the exception cache
   254 // directly.
   255 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
   256   ExceptionCache* ec = exception_cache();
   257   while (ec != NULL) {
   258     if (ec->match_exception_with_space(exception)) {
   259       return ec;
   260     }
   261     ec = ec->next();
   262   }
   263   return NULL;
   264 }
   267 //-----------------------------------------------------------------------------
   270 // Helper used by both find_pc_desc methods.
   271 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
   272   NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
   273   if (!approximate)
   274     return pc->pc_offset() == pc_offset;
   275   else
   276     return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
   277 }
   279 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
   280   if (initial_pc_desc == NULL) {
   281     _last_pc_desc = NULL;  // native method
   282     return;
   283   }
   284   NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
   285   // reset the cache by filling it with benign (non-null) values
   286   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
   287   _last_pc_desc = initial_pc_desc + 1;  // first valid one is after sentinel
   288   for (int i = 0; i < cache_size; i++)
   289     _pc_descs[i] = initial_pc_desc;
   290 }
   292 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
   293   NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
   294   NOT_PRODUCT(if (approximate)  ++nmethod_stats.pc_desc_approx);
   296   // In order to prevent race conditions do not load cache elements
   297   // repeatedly, but use a local copy:
   298   PcDesc* res;
   300   // Step one:  Check the most recently returned value.
   301   res = _last_pc_desc;
   302   if (res == NULL)  return NULL;  // native method; no PcDescs at all
   303   if (match_desc(res, pc_offset, approximate)) {
   304     NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
   305     return res;
   306   }
   308   // Step two:  Check the LRU cache.
   309   for (int i = 0; i < cache_size; i++) {
   310     res = _pc_descs[i];
   311     if (res->pc_offset() < 0)  break;  // optimization: skip empty cache
   312     if (match_desc(res, pc_offset, approximate)) {
   313       NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
   314       _last_pc_desc = res;  // record this cache hit in case of repeat
   315       return res;
   316     }
   317   }
   319   // Report failure.
   320   return NULL;
   321 }
   323 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
   324   NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
   325   // Update the LRU cache by shifting pc_desc forward:
   326   for (int i = 0; i < cache_size; i++)  {
   327     PcDesc* next = _pc_descs[i];
   328     _pc_descs[i] = pc_desc;
   329     pc_desc = next;
   330   }
   331   // Note:  Do not update _last_pc_desc.  It fronts for the LRU cache.
   332 }
   334 // adjust pcs_size so that it is a multiple of both oopSize and
   335 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
   336 // of oopSize, then 2*sizeof(PcDesc) is)
   337 static int  adjust_pcs_size(int pcs_size) {
   338   int nsize = round_to(pcs_size,   oopSize);
   339   if ((nsize % sizeof(PcDesc)) != 0) {
   340     nsize = pcs_size + sizeof(PcDesc);
   341   }
   342   assert((nsize %  oopSize) == 0, "correct alignment");
   343   return nsize;
   344 }
   346 //-----------------------------------------------------------------------------
   349 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
   350   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
   351   assert(new_entry != NULL,"Must be non null");
   352   assert(new_entry->next() == NULL, "Must be null");
   354   if (exception_cache() != NULL) {
   355     new_entry->set_next(exception_cache());
   356   }
   357   set_exception_cache(new_entry);
   358 }
   360 void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
   361   ExceptionCache* prev = NULL;
   362   ExceptionCache* curr = exception_cache();
   363   assert(curr != NULL, "nothing to remove");
   364   // find the previous and next entry of ec
   365   while (curr != ec) {
   366     prev = curr;
   367     curr = curr->next();
   368     assert(curr != NULL, "ExceptionCache not found");
   369   }
   370   // now: curr == ec
   371   ExceptionCache* next = curr->next();
   372   if (prev == NULL) {
   373     set_exception_cache(next);
   374   } else {
   375     prev->set_next(next);
   376   }
   377   delete curr;
   378 }
   381 // public method for accessing the exception cache
   382 // These are the public access methods.
   383 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
   384   // We never grab a lock to read the exception cache, so we may
   385   // have false negatives. This is okay, as it can only happen during
   386   // the first few exception lookups for a given nmethod.
   387   ExceptionCache* ec = exception_cache();
   388   while (ec != NULL) {
   389     address ret_val;
   390     if ((ret_val = ec->match(exception,pc)) != NULL) {
   391       return ret_val;
   392     }
   393     ec = ec->next();
   394   }
   395   return NULL;
   396 }
   399 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
   400   // There are potential race conditions during exception cache updates, so we
   401   // must own the ExceptionCache_lock before doing ANY modifications. Because
   402   // we don't lock during reads, it is possible to have several threads attempt
   403   // to update the cache with the same data. We need to check for already inserted
   404   // copies of the current data before adding it.
   406   MutexLocker ml(ExceptionCache_lock);
   407   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
   409   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
   410     target_entry = new ExceptionCache(exception,pc,handler);
   411     add_exception_cache_entry(target_entry);
   412   }
   413 }
   416 //-------------end of code for ExceptionCache--------------
   419 int nmethod::total_size() const {
   420   return
   421     consts_size()        +
   422     insts_size()         +
   423     stub_size()          +
   424     scopes_data_size()   +
   425     scopes_pcs_size()    +
   426     handler_table_size() +
   427     nul_chk_table_size();
   428 }
   430 const char* nmethod::compile_kind() const {
   431   if (is_osr_method())     return "osr";
   432   if (method() != NULL && is_native_method())  return "c2n";
   433   return NULL;
   434 }
   436 // Fill in default values for various flag fields
   437 void nmethod::init_defaults() {
   438   _state                      = alive;
   439   _marked_for_reclamation     = 0;
   440   _has_flushed_dependencies   = 0;
   441   _speculatively_disconnected = 0;
   442   _has_unsafe_access          = 0;
   443   _has_method_handle_invokes  = 0;
   444   _marked_for_deoptimization  = 0;
   445   _lock_count                 = 0;
   446   _stack_traversal_mark       = 0;
   447   _unload_reported            = false;           // jvmti state
   449   NOT_PRODUCT(_has_debug_info = false);
   450 #ifdef ASSERT
   451   _oops_are_stale             = false;
   452 #endif
   454   _oops_do_mark_link       = NULL;
   455   _jmethod_id              = NULL;
   456   _osr_link                = NULL;
   457   _scavenge_root_link      = NULL;
   458   _scavenge_root_state     = 0;
   459   _saved_nmethod_link      = NULL;
   460   _compiler                = NULL;
   462 #ifdef HAVE_DTRACE_H
   463   _trap_offset             = 0;
   464 #endif // def HAVE_DTRACE_H
   465 }
   468 nmethod* nmethod::new_native_nmethod(methodHandle method,
   469   CodeBuffer *code_buffer,
   470   int vep_offset,
   471   int frame_complete,
   472   int frame_size,
   473   ByteSize basic_lock_owner_sp_offset,
   474   ByteSize basic_lock_sp_offset,
   475   OopMapSet* oop_maps) {
   476   // create nmethod
   477   nmethod* nm = NULL;
   478   {
   479     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   480     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
   481     CodeOffsets offsets;
   482     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   483     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   484     nm = new (native_nmethod_size)
   485       nmethod(method(), native_nmethod_size, &offsets,
   486               code_buffer, frame_size,
   487               basic_lock_owner_sp_offset, basic_lock_sp_offset,
   488               oop_maps);
   489     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
   490     if (PrintAssembly && nm != NULL)
   491       Disassembler::decode(nm);
   492   }
   493   // verify nmethod
   494   debug_only(if (nm) nm->verify();) // might block
   496   if (nm != NULL) {
   497     nm->log_new_nmethod();
   498   }
   500   return nm;
   501 }
   503 #ifdef HAVE_DTRACE_H
   504 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
   505                                      CodeBuffer *code_buffer,
   506                                      int vep_offset,
   507                                      int trap_offset,
   508                                      int frame_complete,
   509                                      int frame_size) {
   510   // create nmethod
   511   nmethod* nm = NULL;
   512   {
   513     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   514     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
   515     CodeOffsets offsets;
   516     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   517     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
   518     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   520     nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
   522     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
   523     if (PrintAssembly && nm != NULL)
   524       Disassembler::decode(nm);
   525   }
   526   // verify nmethod
   527   debug_only(if (nm) nm->verify();) // might block
   529   if (nm != NULL) {
   530     nm->log_new_nmethod();
   531   }
   533   return nm;
   534 }
   536 #endif // def HAVE_DTRACE_H
   538 nmethod* nmethod::new_nmethod(methodHandle method,
   539   int compile_id,
   540   int entry_bci,
   541   CodeOffsets* offsets,
   542   int orig_pc_offset,
   543   DebugInformationRecorder* debug_info,
   544   Dependencies* dependencies,
   545   CodeBuffer* code_buffer, int frame_size,
   546   OopMapSet* oop_maps,
   547   ExceptionHandlerTable* handler_table,
   548   ImplicitExceptionTable* nul_chk_table,
   549   AbstractCompiler* compiler,
   550   int comp_level
   551 )
   552 {
   553   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   554   // create nmethod
   555   nmethod* nm = NULL;
   556   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   557     int nmethod_size =
   558       allocation_size(code_buffer, sizeof(nmethod))
   559       + adjust_pcs_size(debug_info->pcs_size())
   560       + round_to(dependencies->size_in_bytes() , oopSize)
   561       + round_to(handler_table->size_in_bytes(), oopSize)
   562       + round_to(nul_chk_table->size_in_bytes(), oopSize)
   563       + round_to(debug_info->data_size()       , oopSize);
   564     nm = new (nmethod_size)
   565       nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
   566               orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
   567               oop_maps,
   568               handler_table,
   569               nul_chk_table,
   570               compiler,
   571               comp_level);
   572     if (nm != NULL) {
   573       // To make dependency checking during class loading fast, record
   574       // the nmethod dependencies in the classes it is dependent on.
   575       // This allows the dependency checking code to simply walk the
   576       // class hierarchy above the loaded class, checking only nmethods
   577       // which are dependent on those classes.  The slow way is to
   578       // check every nmethod for dependencies which makes it linear in
   579       // the number of methods compiled.  For applications with a lot
   580       // classes the slow way is too slow.
   581       for (Dependencies::DepStream deps(nm); deps.next(); ) {
   582         klassOop klass = deps.context_type();
   583         if (klass == NULL)  continue;  // ignore things like evol_method
   585         // record this nmethod as dependent on this klass
   586         instanceKlass::cast(klass)->add_dependent_nmethod(nm);
   587       }
   588     }
   589     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
   590     if (PrintAssembly && nm != NULL)
   591       Disassembler::decode(nm);
   592   }
   594   // verify nmethod
   595   debug_only(if (nm) nm->verify();) // might block
   597   if (nm != NULL) {
   598     nm->log_new_nmethod();
   599   }
   601   // done
   602   return nm;
   603 }
   606 // For native wrappers
   607 nmethod::nmethod(
   608   methodOop method,
   609   int nmethod_size,
   610   CodeOffsets* offsets,
   611   CodeBuffer* code_buffer,
   612   int frame_size,
   613   ByteSize basic_lock_owner_sp_offset,
   614   ByteSize basic_lock_sp_offset,
   615   OopMapSet* oop_maps )
   616   : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
   617              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
   618   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
   619   _native_basic_lock_sp_offset(basic_lock_sp_offset)
   620 {
   621   {
   622     debug_only(No_Safepoint_Verifier nsv;)
   623     assert_locked_or_safepoint(CodeCache_lock);
   625     init_defaults();
   626     _method                  = method;
   627     _entry_bci               = InvocationEntryBci;
   628     // We have no exception handler or deopt handler make the
   629     // values something that will never match a pc like the nmethod vtable entry
   630     _exception_offset        = 0;
   631     _deoptimize_offset       = 0;
   632     _deoptimize_mh_offset    = 0;
   633     _orig_pc_offset          = 0;
   635     _consts_offset           = data_offset();
   636     _stub_offset             = data_offset();
   637     _oops_offset             = data_offset();
   638     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
   639     _scopes_pcs_offset       = _scopes_data_offset;
   640     _dependencies_offset     = _scopes_pcs_offset;
   641     _handler_table_offset    = _dependencies_offset;
   642     _nul_chk_table_offset    = _handler_table_offset;
   643     _nmethod_end_offset      = _nul_chk_table_offset;
   644     _compile_id              = 0;  // default
   645     _comp_level              = CompLevel_none;
   646     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   647     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   648     _osr_entry_point         = NULL;
   649     _exception_cache         = NULL;
   650     _pc_desc_cache.reset_to(NULL);
   652     code_buffer->copy_oops_to(this);
   653     debug_only(verify_scavenge_root_oops());
   654     CodeCache::commit(this);
   655   }
   657   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
   658     ttyLocker ttyl;  // keep the following output all in one block
   659     // This output goes directly to the tty, not the compiler log.
   660     // To enable tools to match it up with the compilation activity,
   661     // be sure to tag this tty output with the compile ID.
   662     if (xtty != NULL) {
   663       xtty->begin_head("print_native_nmethod");
   664       xtty->method(_method);
   665       xtty->stamp();
   666       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   667     }
   668     // print the header part first
   669     print();
   670     // then print the requested information
   671     if (PrintNativeNMethods) {
   672       print_code();
   673       oop_maps->print();
   674     }
   675     if (PrintRelocations) {
   676       print_relocations();
   677     }
   678     if (xtty != NULL) {
   679       xtty->tail("print_native_nmethod");
   680     }
   681   }
   682   Events::log("Create nmethod " INTPTR_FORMAT, this);
   683 }
   685 // For dtrace wrappers
   686 #ifdef HAVE_DTRACE_H
   687 nmethod::nmethod(
   688   methodOop method,
   689   int nmethod_size,
   690   CodeOffsets* offsets,
   691   CodeBuffer* code_buffer,
   692   int frame_size)
   693   : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
   694              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
   695   _native_receiver_sp_offset(in_ByteSize(-1)),
   696   _native_basic_lock_sp_offset(in_ByteSize(-1))
   697 {
   698   {
   699     debug_only(No_Safepoint_Verifier nsv;)
   700     assert_locked_or_safepoint(CodeCache_lock);
   702     init_defaults();
   703     _method                  = method;
   704     _entry_bci               = InvocationEntryBci;
   705     // We have no exception handler or deopt handler make the
   706     // values something that will never match a pc like the nmethod vtable entry
   707     _exception_offset        = 0;
   708     _deoptimize_offset       = 0;
   709     _deoptimize_mh_offset    = 0;
   710     _unwind_handler_offset   = -1;
   711     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
   712     _orig_pc_offset          = 0;
   713     _consts_offset           = data_offset();
   714     _stub_offset             = data_offset();
   715     _oops_offset             = data_offset();
   716     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
   717     _scopes_pcs_offset       = _scopes_data_offset;
   718     _dependencies_offset     = _scopes_pcs_offset;
   719     _handler_table_offset    = _dependencies_offset;
   720     _nul_chk_table_offset    = _handler_table_offset;
   721     _nmethod_end_offset      = _nul_chk_table_offset;
   722     _compile_id              = 0;  // default
   723     _comp_level              = CompLevel_none;
   724     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   725     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   726     _osr_entry_point         = NULL;
   727     _exception_cache         = NULL;
   728     _pc_desc_cache.reset_to(NULL);
   730     code_buffer->copy_oops_to(this);
   731     debug_only(verify_scavenge_root_oops());
   732     CodeCache::commit(this);
   733   }
   735   if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
   736     ttyLocker ttyl;  // keep the following output all in one block
   737     // This output goes directly to the tty, not the compiler log.
   738     // To enable tools to match it up with the compilation activity,
   739     // be sure to tag this tty output with the compile ID.
   740     if (xtty != NULL) {
   741       xtty->begin_head("print_dtrace_nmethod");
   742       xtty->method(_method);
   743       xtty->stamp();
   744       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   745     }
   746     // print the header part first
   747     print();
   748     // then print the requested information
   749     if (PrintNMethods) {
   750       print_code();
   751     }
   752     if (PrintRelocations) {
   753       print_relocations();
   754     }
   755     if (xtty != NULL) {
   756       xtty->tail("print_dtrace_nmethod");
   757     }
   758   }
   759   Events::log("Create nmethod " INTPTR_FORMAT, this);
   760 }
   761 #endif // def HAVE_DTRACE_H
   763 void* nmethod::operator new(size_t size, int nmethod_size) {
   764   // Always leave some room in the CodeCache for I2C/C2I adapters
   765   if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL;
   766   return CodeCache::allocate(nmethod_size);
   767 }
   770 nmethod::nmethod(
   771   methodOop method,
   772   int nmethod_size,
   773   int compile_id,
   774   int entry_bci,
   775   CodeOffsets* offsets,
   776   int orig_pc_offset,
   777   DebugInformationRecorder* debug_info,
   778   Dependencies* dependencies,
   779   CodeBuffer *code_buffer,
   780   int frame_size,
   781   OopMapSet* oop_maps,
   782   ExceptionHandlerTable* handler_table,
   783   ImplicitExceptionTable* nul_chk_table,
   784   AbstractCompiler* compiler,
   785   int comp_level
   786   )
   787   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
   788              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
   789   _native_receiver_sp_offset(in_ByteSize(-1)),
   790   _native_basic_lock_sp_offset(in_ByteSize(-1))
   791 {
   792   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   793   {
   794     debug_only(No_Safepoint_Verifier nsv;)
   795     assert_locked_or_safepoint(CodeCache_lock);
   797     init_defaults();
   798     _method                  = method;
   799     _entry_bci               = entry_bci;
   800     _compile_id              = compile_id;
   801     _comp_level              = comp_level;
   802     _compiler                = compiler;
   803     _orig_pc_offset          = orig_pc_offset;
   805     // Section offsets
   806     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
   807     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
   809     // Exception handler and deopt handler are in the stub section
   810     assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
   811     assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
   812     _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
   813     _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
   814     if (offsets->value(CodeOffsets::DeoptMH) != -1) {
   815       _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
   816     } else {
   817       _deoptimize_mh_offset  = -1;
   818     }
   819     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
   820       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
   821     } else {
   822       _unwind_handler_offset = -1;
   823     }
   825     _oops_offset             = data_offset();
   826     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size (), oopSize);
   827     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
   828     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
   829     _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
   830     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
   831     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
   833     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
   834     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
   835     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
   836     _exception_cache         = NULL;
   837     _pc_desc_cache.reset_to(scopes_pcs_begin());
   839     // Copy contents of ScopeDescRecorder to nmethod
   840     code_buffer->copy_oops_to(this);
   841     debug_info->copy_to(this);
   842     dependencies->copy_to(this);
   843     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
   844       CodeCache::add_scavenge_root_nmethod(this);
   845     }
   846     debug_only(verify_scavenge_root_oops());
   848     CodeCache::commit(this);
   850     // Copy contents of ExceptionHandlerTable to nmethod
   851     handler_table->copy_to(this);
   852     nul_chk_table->copy_to(this);
   854     // we use the information of entry points to find out if a method is
   855     // static or non static
   856     assert(compiler->is_c2() ||
   857            _method->is_static() == (entry_point() == _verified_entry_point),
   858            " entry points must be same for static methods and vice versa");
   859   }
   861   bool printnmethods = PrintNMethods
   862     || CompilerOracle::should_print(_method)
   863     || CompilerOracle::has_option_string(_method, "PrintNMethods");
   864   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
   865     print_nmethod(printnmethods);
   866   }
   868   // Note: Do not verify in here as the CodeCache_lock is
   869   //       taken which would conflict with the CompiledIC_lock
   870   //       which taken during the verification of call sites.
   871   //       (was bug - gri 10/25/99)
   873   Events::log("Create nmethod " INTPTR_FORMAT, this);
   874 }
   877 // Print a short set of xml attributes to identify this nmethod.  The
   878 // output should be embedded in some other element.
   879 void nmethod::log_identity(xmlStream* log) const {
   880   log->print(" compile_id='%d'", compile_id());
   881   const char* nm_kind = compile_kind();
   882   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
   883   if (compiler() != NULL) {
   884     log->print(" compiler='%s'", compiler()->name());
   885   }
   886   if (TieredCompilation) {
   887     log->print(" level='%d'", comp_level());
   888   }
   889 }
   892 #define LOG_OFFSET(log, name)                    \
   893   if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
   894     log->print(" " XSTR(name) "_offset='%d'"    , \
   895                (intptr_t)name##_begin() - (intptr_t)this)
   898 void nmethod::log_new_nmethod() const {
   899   if (LogCompilation && xtty != NULL) {
   900     ttyLocker ttyl;
   901     HandleMark hm;
   902     xtty->begin_elem("nmethod");
   903     log_identity(xtty);
   904     xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
   905     xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
   907     LOG_OFFSET(xtty, relocation);
   908     LOG_OFFSET(xtty, consts);
   909     LOG_OFFSET(xtty, insts);
   910     LOG_OFFSET(xtty, stub);
   911     LOG_OFFSET(xtty, scopes_data);
   912     LOG_OFFSET(xtty, scopes_pcs);
   913     LOG_OFFSET(xtty, dependencies);
   914     LOG_OFFSET(xtty, handler_table);
   915     LOG_OFFSET(xtty, nul_chk_table);
   916     LOG_OFFSET(xtty, oops);
   918     xtty->method(method());
   919     xtty->stamp();
   920     xtty->end_elem();
   921   }
   922 }
   924 #undef LOG_OFFSET
   927 void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
   928                                 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
   929   bool is_synchronized = false, has_xhandler = false, is_native = false;
   930   int code_size = -1;
   931   if (method != NULL) {
   932     is_synchronized = method->is_synchronized();
   933     has_xhandler    = method->has_exception_handler();
   934     is_native       = method->is_native();
   935     code_size       = method->code_size();
   936   }
   937   // print compilation number
   938   st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
   940   // print method attributes
   941   const bool is_osr = bci != InvocationEntryBci;
   942   const char blocking_char  = is_blocking     ? 'b' : ' ';
   943   const char compile_type   = is_osr          ? '%' : ' ';
   944   const char sync_char      = is_synchronized ? 's' : ' ';
   945   const char exception_char = has_xhandler    ? '!' : ' ';
   946   const char native_char    = is_native       ? 'n' : ' ';
   947   st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
   948   if (TieredCompilation) {
   949     st->print("%d ", comp_level);
   950   }
   952   // print optional title
   953   bool do_nl = false;
   954   if (title != NULL) {
   955     int tlen = (int) strlen(title);
   956     bool do_nl = false;
   957     if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
   958     st->print("%.*s", tlen, title);
   959   } else {
   960     do_nl = true;
   961   }
   963   // print method name string if given
   964   if (method_name != NULL) {
   965     st->print(method_name);
   966   } else {
   967     // otherwise as the method to print itself
   968     if (method != NULL && !Universe::heap()->is_gc_active()) {
   969       method->print_short_name(st);
   970     } else {
   971       st->print("(method)");
   972     }
   973   }
   975   if (method != NULL) {
   976     // print osr_bci if any
   977     if (is_osr) st->print(" @ %d", bci);
   978     // print method size
   979     st->print(" (%d bytes)", code_size);
   980   }
   981   if (do_nl) st->cr();
   982 }
   984 // Print out more verbose output usually for a newly created nmethod.
   985 void nmethod::print_on(outputStream* st, const char* title) const {
   986   if (st != NULL) {
   987     ttyLocker ttyl;
   988     print_compilation(st, /*method_name*/NULL, title,
   989                       method(), /*is_blocking*/false,
   990                       compile_id(),
   991                       is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
   992                       comp_level());
   993     if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
   994   }
   995 }
   998 void nmethod::print_nmethod(bool printmethod) {
   999   ttyLocker ttyl;  // keep the following output all in one block
  1000   if (xtty != NULL) {
  1001     xtty->begin_head("print_nmethod");
  1002     xtty->stamp();
  1003     xtty->end_head();
  1005   // print the header part first
  1006   print();
  1007   // then print the requested information
  1008   if (printmethod) {
  1009     print_code();
  1010     print_pcs();
  1011     oop_maps()->print();
  1013   if (PrintDebugInfo) {
  1014     print_scopes();
  1016   if (PrintRelocations) {
  1017     print_relocations();
  1019   if (PrintDependencies) {
  1020     print_dependencies();
  1022   if (PrintExceptionHandlers) {
  1023     print_handler_table();
  1024     print_nul_chk_table();
  1026   if (xtty != NULL) {
  1027     xtty->tail("print_nmethod");
  1032 // Promote one word from an assembly-time handle to a live embedded oop.
  1033 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
  1034   if (handle == NULL ||
  1035       // As a special case, IC oops are initialized to 1 or -1.
  1036       handle == (jobject) Universe::non_oop_word()) {
  1037     (*dest) = (oop) handle;
  1038   } else {
  1039     (*dest) = JNIHandles::resolve_non_null(handle);
  1044 void nmethod::copy_oops(GrowableArray<jobject>* array) {
  1045   //assert(oops_size() == 0, "do this handshake just once, please");
  1046   int length = array->length();
  1047   assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
  1048   oop* dest = oops_begin();
  1049   for (int index = 0 ; index < length; index++) {
  1050     initialize_immediate_oop(&dest[index], array->at(index));
  1053   // Now we can fix up all the oops in the code.  We need to do this
  1054   // in the code because the assembler uses jobjects as placeholders.
  1055   // The code and relocations have already been initialized by the
  1056   // CodeBlob constructor, so it is valid even at this early point to
  1057   // iterate over relocations and patch the code.
  1058   fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
  1062 bool nmethod::is_at_poll_return(address pc) {
  1063   RelocIterator iter(this, pc, pc+1);
  1064   while (iter.next()) {
  1065     if (iter.type() == relocInfo::poll_return_type)
  1066       return true;
  1068   return false;
  1072 bool nmethod::is_at_poll_or_poll_return(address pc) {
  1073   RelocIterator iter(this, pc, pc+1);
  1074   while (iter.next()) {
  1075     relocInfo::relocType t = iter.type();
  1076     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
  1077       return true;
  1079   return false;
  1083 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
  1084   // re-patch all oop-bearing instructions, just in case some oops moved
  1085   RelocIterator iter(this, begin, end);
  1086   while (iter.next()) {
  1087     if (iter.type() == relocInfo::oop_type) {
  1088       oop_Relocation* reloc = iter.oop_reloc();
  1089       if (initialize_immediates && reloc->oop_is_immediate()) {
  1090         oop* dest = reloc->oop_addr();
  1091         initialize_immediate_oop(dest, (jobject) *dest);
  1093       // Refresh the oop-related bits of this instruction.
  1094       reloc->fix_oop_relocation();
  1097     // There must not be any interfering patches or breakpoints.
  1098     assert(!(iter.type() == relocInfo::breakpoint_type
  1099              && iter.breakpoint_reloc()->active()),
  1100            "no active breakpoint");
  1105 ScopeDesc* nmethod::scope_desc_at(address pc) {
  1106   PcDesc* pd = pc_desc_at(pc);
  1107   guarantee(pd != NULL, "scope must be present");
  1108   return new ScopeDesc(this, pd->scope_decode_offset(),
  1109                        pd->obj_decode_offset(), pd->should_reexecute(),
  1110                        pd->return_oop());
  1114 void nmethod::clear_inline_caches() {
  1115   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
  1116   if (is_zombie()) {
  1117     return;
  1120   RelocIterator iter(this);
  1121   while (iter.next()) {
  1122     iter.reloc()->clear_inline_cache();
  1127 void nmethod::cleanup_inline_caches() {
  1129   assert_locked_or_safepoint(CompiledIC_lock);
  1131   // If the method is not entrant or zombie then a JMP is plastered over the
  1132   // first few bytes.  If an oop in the old code was there, that oop
  1133   // should not get GC'd.  Skip the first few bytes of oops on
  1134   // not-entrant methods.
  1135   address low_boundary = verified_entry_point();
  1136   if (!is_in_use()) {
  1137     low_boundary += NativeJump::instruction_size;
  1138     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1139     // This means that the low_boundary is going to be a little too high.
  1140     // This shouldn't matter, since oops of non-entrant methods are never used.
  1141     // In fact, why are we bothering to look at oops in a non-entrant method??
  1144   // Find all calls in an nmethod, and clear the ones that points to zombie methods
  1145   ResourceMark rm;
  1146   RelocIterator iter(this, low_boundary);
  1147   while(iter.next()) {
  1148     switch(iter.type()) {
  1149       case relocInfo::virtual_call_type:
  1150       case relocInfo::opt_virtual_call_type: {
  1151         CompiledIC *ic = CompiledIC_at(iter.reloc());
  1152         // Ok, to lookup references to zombies here
  1153         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
  1154         if( cb != NULL && cb->is_nmethod() ) {
  1155           nmethod* nm = (nmethod*)cb;
  1156           // Clean inline caches pointing to both zombie and not_entrant methods
  1157           if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
  1159         break;
  1161       case relocInfo::static_call_type: {
  1162         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
  1163         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
  1164         if( cb != NULL && cb->is_nmethod() ) {
  1165           nmethod* nm = (nmethod*)cb;
  1166           // Clean inline caches pointing to both zombie and not_entrant methods
  1167           if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
  1169         break;
  1175 // This is a private interface with the sweeper.
  1176 void nmethod::mark_as_seen_on_stack() {
  1177   assert(is_not_entrant(), "must be a non-entrant method");
  1178   // Set the traversal mark to ensure that the sweeper does 2
  1179   // cleaning passes before moving to zombie.
  1180   set_stack_traversal_mark(NMethodSweeper::traversal_count());
  1183 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
  1184 bool nmethod::can_not_entrant_be_converted() {
  1185   assert(is_not_entrant(), "must be a non-entrant method");
  1187   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
  1188   // count can be greater than the stack traversal count before it hits the
  1189   // nmethod for the second time.
  1190   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
  1193 void nmethod::inc_decompile_count() {
  1194   if (!is_compiled_by_c2()) return;
  1195   // Could be gated by ProfileTraps, but do not bother...
  1196   methodOop m = method();
  1197   if (m == NULL)  return;
  1198   methodDataOop mdo = m->method_data();
  1199   if (mdo == NULL)  return;
  1200   // There is a benign race here.  See comments in methodDataOop.hpp.
  1201   mdo->inc_decompile_count();
  1204 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
  1206   post_compiled_method_unload();
  1208   // Since this nmethod is being unloaded, make sure that dependencies
  1209   // recorded in instanceKlasses get flushed and pass non-NULL closure to
  1210   // indicate that this work is being done during a GC.
  1211   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
  1212   assert(is_alive != NULL, "Should be non-NULL");
  1213   // A non-NULL is_alive closure indicates that this is being called during GC.
  1214   flush_dependencies(is_alive);
  1216   // Break cycle between nmethod & method
  1217   if (TraceClassUnloading && WizardMode) {
  1218     tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
  1219                   " unloadable], methodOop(" INTPTR_FORMAT
  1220                   "), cause(" INTPTR_FORMAT ")",
  1221                   this, (address)_method, (address)cause);
  1222     if (!Universe::heap()->is_gc_active())
  1223       cause->klass()->print();
  1225   // Unlink the osr method, so we do not look this up again
  1226   if (is_osr_method()) {
  1227     invalidate_osr_method();
  1229   // If _method is already NULL the methodOop is about to be unloaded,
  1230   // so we don't have to break the cycle. Note that it is possible to
  1231   // have the methodOop live here, in case we unload the nmethod because
  1232   // it is pointing to some oop (other than the methodOop) being unloaded.
  1233   if (_method != NULL) {
  1234     // OSR methods point to the methodOop, but the methodOop does not
  1235     // point back!
  1236     if (_method->code() == this) {
  1237       _method->clear_code(); // Break a cycle
  1239     _method = NULL;            // Clear the method of this dead nmethod
  1241   // Make the class unloaded - i.e., change state and notify sweeper
  1242   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  1243   if (is_in_use()) {
  1244     // Transitioning directly from live to unloaded -- so
  1245     // we need to force a cache clean-up; remember this
  1246     // for later on.
  1247     CodeCache::set_needs_cache_clean(true);
  1249   _state = unloaded;
  1251   // Log the unloading.
  1252   log_state_change();
  1254   // The methodOop is gone at this point
  1255   assert(_method == NULL, "Tautology");
  1257   set_osr_link(NULL);
  1258   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
  1259   NMethodSweeper::notify(this);
  1262 void nmethod::invalidate_osr_method() {
  1263   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
  1264   // Remove from list of active nmethods
  1265   if (method() != NULL)
  1266     instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
  1267   // Set entry as invalid
  1268   _entry_bci = InvalidOSREntryBci;
  1271 void nmethod::log_state_change() const {
  1272   if (LogCompilation) {
  1273     if (xtty != NULL) {
  1274       ttyLocker ttyl;  // keep the following output all in one block
  1275       if (_state == unloaded) {
  1276         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
  1277                          os::current_thread_id());
  1278       } else {
  1279         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
  1280                          os::current_thread_id(),
  1281                          (_state == zombie ? " zombie='1'" : ""));
  1283       log_identity(xtty);
  1284       xtty->stamp();
  1285       xtty->end_elem();
  1288   if (PrintCompilation && _state != unloaded) {
  1289     print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
  1290     tty->cr();
  1294 // Common functionality for both make_not_entrant and make_zombie
  1295 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
  1296   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
  1298   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
  1299   nmethodLocker nml(this);
  1300   methodHandle the_method(method());
  1301   No_Safepoint_Verifier nsv;
  1304     // If the method is already zombie there is nothing to do
  1305     if (is_zombie()) {
  1306       return false;
  1309     // invalidate osr nmethod before acquiring the patching lock since
  1310     // they both acquire leaf locks and we don't want a deadlock.
  1311     // This logic is equivalent to the logic below for patching the
  1312     // verified entry point of regular methods.
  1313     if (is_osr_method()) {
  1314       // this effectively makes the osr nmethod not entrant
  1315       invalidate_osr_method();
  1318     // Enter critical section.  Does not block for safepoint.
  1319     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
  1321     if (_state == state) {
  1322       // another thread already performed this transition so nothing
  1323       // to do, but return false to indicate this.
  1324       return false;
  1327     // The caller can be calling the method statically or through an inline
  1328     // cache call.
  1329     if (!is_osr_method() && !is_not_entrant()) {
  1330       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
  1331                   SharedRuntime::get_handle_wrong_method_stub());
  1334     if (is_in_use()) {
  1335       // It's a true state change, so mark the method as decompiled.
  1336       // Do it only for transition from alive.
  1337       inc_decompile_count();
  1340     // Change state
  1341     _state = state;
  1343     // Log the transition once
  1344     log_state_change();
  1346     // Remove nmethod from method.
  1347     // We need to check if both the _code and _from_compiled_code_entry_point
  1348     // refer to this nmethod because there is a race in setting these two fields
  1349     // in methodOop as seen in bugid 4947125.
  1350     // If the vep() points to the zombie nmethod, the memory for the nmethod
  1351     // could be flushed and the compiler and vtable stubs could still call
  1352     // through it.
  1353     if (method() != NULL && (method()->code() == this ||
  1354                              method()->from_compiled_entry() == verified_entry_point())) {
  1355       HandleMark hm;
  1356       method()->clear_code();
  1359     if (state == not_entrant) {
  1360       mark_as_seen_on_stack();
  1363   } // leave critical region under Patching_lock
  1365   // When the nmethod becomes zombie it is no longer alive so the
  1366   // dependencies must be flushed.  nmethods in the not_entrant
  1367   // state will be flushed later when the transition to zombie
  1368   // happens or they get unloaded.
  1369   if (state == zombie) {
  1371       // Flushing dependecies must be done before any possible
  1372       // safepoint can sneak in, otherwise the oops used by the
  1373       // dependency logic could have become stale.
  1374       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1375       flush_dependencies(NULL);
  1379       // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
  1380       // and it hasn't already been reported for this nmethod then report it now.
  1381       // (the event may have been reported earilier if the GC marked it for unloading).
  1382       Pause_No_Safepoint_Verifier pnsv(&nsv);
  1383       post_compiled_method_unload();
  1386 #ifdef ASSERT
  1387     // It's no longer safe to access the oops section since zombie
  1388     // nmethods aren't scanned for GC.
  1389     _oops_are_stale = true;
  1390 #endif
  1391   } else {
  1392     assert(state == not_entrant, "other cases may need to be handled differently");
  1395   if (TraceCreateZombies) {
  1396     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
  1399   // Make sweeper aware that there is a zombie method that needs to be removed
  1400   NMethodSweeper::notify(this);
  1402   return true;
  1405 void nmethod::flush() {
  1406   // Note that there are no valid oops in the nmethod anymore.
  1407   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
  1408   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
  1410   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
  1411   assert_locked_or_safepoint(CodeCache_lock);
  1413   // completely deallocate this method
  1414   EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
  1415   if (PrintMethodFlushing) {
  1416     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
  1417         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
  1420   // We need to deallocate any ExceptionCache data.
  1421   // Note that we do not need to grab the nmethod lock for this, it
  1422   // better be thread safe if we're disposing of it!
  1423   ExceptionCache* ec = exception_cache();
  1424   set_exception_cache(NULL);
  1425   while(ec != NULL) {
  1426     ExceptionCache* next = ec->next();
  1427     delete ec;
  1428     ec = next;
  1431   if (on_scavenge_root_list()) {
  1432     CodeCache::drop_scavenge_root_nmethod(this);
  1435   if (is_speculatively_disconnected()) {
  1436     CodeCache::remove_saved_code(this);
  1439 #ifdef SHARK
  1440   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
  1441 #endif // SHARK
  1443   ((CodeBlob*)(this))->flush();
  1445   CodeCache::free(this);
  1449 //
  1450 // Notify all classes this nmethod is dependent on that it is no
  1451 // longer dependent. This should only be called in two situations.
  1452 // First, when a nmethod transitions to a zombie all dependents need
  1453 // to be clear.  Since zombification happens at a safepoint there's no
  1454 // synchronization issues.  The second place is a little more tricky.
  1455 // During phase 1 of mark sweep class unloading may happen and as a
  1456 // result some nmethods may get unloaded.  In this case the flushing
  1457 // of dependencies must happen during phase 1 since after GC any
  1458 // dependencies in the unloaded nmethod won't be updated, so
  1459 // traversing the dependency information in unsafe.  In that case this
  1460 // function is called with a non-NULL argument and this function only
  1461 // notifies instanceKlasses that are reachable
  1463 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
  1464   assert_locked_or_safepoint(CodeCache_lock);
  1465   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
  1466   "is_alive is non-NULL if and only if we are called during GC");
  1467   if (!has_flushed_dependencies()) {
  1468     set_has_flushed_dependencies();
  1469     for (Dependencies::DepStream deps(this); deps.next(); ) {
  1470       klassOop klass = deps.context_type();
  1471       if (klass == NULL)  continue;  // ignore things like evol_method
  1473       // During GC the is_alive closure is non-NULL, and is used to
  1474       // determine liveness of dependees that need to be updated.
  1475       if (is_alive == NULL || is_alive->do_object_b(klass)) {
  1476         instanceKlass::cast(klass)->remove_dependent_nmethod(this);
  1483 // If this oop is not live, the nmethod can be unloaded.
  1484 bool nmethod::can_unload(BoolObjectClosure* is_alive,
  1485                          OopClosure* keep_alive,
  1486                          oop* root, bool unloading_occurred) {
  1487   assert(root != NULL, "just checking");
  1488   oop obj = *root;
  1489   if (obj == NULL || is_alive->do_object_b(obj)) {
  1490       return false;
  1492   if (obj->is_compiledICHolder()) {
  1493     compiledICHolderOop cichk_oop = compiledICHolderOop(obj);
  1494     if (is_alive->do_object_b(
  1495           cichk_oop->holder_method()->method_holder()) &&
  1496         is_alive->do_object_b(cichk_oop->holder_klass())) {
  1497       // The oop should be kept alive
  1498       keep_alive->do_oop(root);
  1499       return false;
  1502   // If ScavengeRootsInCode is true, an nmethod might be unloaded
  1503   // simply because one of its constant oops has gone dead.
  1504   // No actual classes need to be unloaded in order for this to occur.
  1505   assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
  1506   make_unloaded(is_alive, obj);
  1507   return true;
  1510 // ------------------------------------------------------------------
  1511 // post_compiled_method_load_event
  1512 // new method for install_code() path
  1513 // Transfer information from compilation to jvmti
  1514 void nmethod::post_compiled_method_load_event() {
  1516   methodOop moop = method();
  1517   HS_DTRACE_PROBE8(hotspot, compiled__method__load,
  1518       moop->klass_name()->bytes(),
  1519       moop->klass_name()->utf8_length(),
  1520       moop->name()->bytes(),
  1521       moop->name()->utf8_length(),
  1522       moop->signature()->bytes(),
  1523       moop->signature()->utf8_length(),
  1524       insts_begin(), insts_size());
  1526   if (JvmtiExport::should_post_compiled_method_load() ||
  1527       JvmtiExport::should_post_compiled_method_unload()) {
  1528     get_and_cache_jmethod_id();
  1531   if (JvmtiExport::should_post_compiled_method_load()) {
  1532     // Let the Service thread (which is a real Java thread) post the event
  1533     MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
  1534     JvmtiDeferredEventQueue::enqueue(
  1535       JvmtiDeferredEvent::compiled_method_load_event(this));
  1539 jmethodID nmethod::get_and_cache_jmethod_id() {
  1540   if (_jmethod_id == NULL) {
  1541     // Cache the jmethod_id since it can no longer be looked up once the
  1542     // method itself has been marked for unloading.
  1543     _jmethod_id = method()->jmethod_id();
  1545   return _jmethod_id;
  1548 void nmethod::post_compiled_method_unload() {
  1549   if (unload_reported()) {
  1550     // During unloading we transition to unloaded and then to zombie
  1551     // and the unloading is reported during the first transition.
  1552     return;
  1555   assert(_method != NULL && !is_unloaded(), "just checking");
  1556   DTRACE_METHOD_UNLOAD_PROBE(method());
  1558   // If a JVMTI agent has enabled the CompiledMethodUnload event then
  1559   // post the event. Sometime later this nmethod will be made a zombie
  1560   // by the sweeper but the methodOop will not be valid at that point.
  1561   // If the _jmethod_id is null then no load event was ever requested
  1562   // so don't bother posting the unload.  The main reason for this is
  1563   // that the jmethodID is a weak reference to the methodOop so if
  1564   // it's being unloaded there's no way to look it up since the weak
  1565   // ref will have been cleared.
  1566   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
  1567     assert(!unload_reported(), "already unloaded");
  1568     JvmtiDeferredEvent event =
  1569       JvmtiDeferredEvent::compiled_method_unload_event(
  1570           _jmethod_id, insts_begin());
  1571     if (SafepointSynchronize::is_at_safepoint()) {
  1572       // Don't want to take the queueing lock. Add it as pending and
  1573       // it will get enqueued later.
  1574       JvmtiDeferredEventQueue::add_pending_event(event);
  1575     } else {
  1576       MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
  1577       JvmtiDeferredEventQueue::enqueue(event);
  1581   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
  1582   // any time. As the nmethod is being unloaded now we mark it has
  1583   // having the unload event reported - this will ensure that we don't
  1584   // attempt to report the event in the unlikely scenario where the
  1585   // event is enabled at the time the nmethod is made a zombie.
  1586   set_unload_reported();
  1589 // This is called at the end of the strong tracing/marking phase of a
  1590 // GC to unload an nmethod if it contains otherwise unreachable
  1591 // oops.
  1593 void nmethod::do_unloading(BoolObjectClosure* is_alive,
  1594                            OopClosure* keep_alive, bool unloading_occurred) {
  1595   // Make sure the oop's ready to receive visitors
  1596   assert(!is_zombie() && !is_unloaded(),
  1597          "should not call follow on zombie or unloaded nmethod");
  1599   // If the method is not entrant then a JMP is plastered over the
  1600   // first few bytes.  If an oop in the old code was there, that oop
  1601   // should not get GC'd.  Skip the first few bytes of oops on
  1602   // not-entrant methods.
  1603   address low_boundary = verified_entry_point();
  1604   if (is_not_entrant()) {
  1605     low_boundary += NativeJump::instruction_size;
  1606     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1607     // (See comment above.)
  1610   // The RedefineClasses() API can cause the class unloading invariant
  1611   // to no longer be true. See jvmtiExport.hpp for details.
  1612   // Also, leave a debugging breadcrumb in local flag.
  1613   bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  1614   if (a_class_was_redefined) {
  1615     // This set of the unloading_occurred flag is done before the
  1616     // call to post_compiled_method_unload() so that the unloading
  1617     // of this nmethod is reported.
  1618     unloading_occurred = true;
  1621   // Follow methodOop
  1622   if (can_unload(is_alive, keep_alive, (oop*)&_method, unloading_occurred)) {
  1623     return;
  1626   // Exception cache
  1627   ExceptionCache* ec = exception_cache();
  1628   while (ec != NULL) {
  1629     oop* ex_addr = (oop*)ec->exception_type_addr();
  1630     oop ex = *ex_addr;
  1631     ExceptionCache* next_ec = ec->next();
  1632     if (ex != NULL && !is_alive->do_object_b(ex)) {
  1633       assert(!ex->is_compiledICHolder(), "Possible error here");
  1634       remove_from_exception_cache(ec);
  1636     ec = next_ec;
  1639   // If class unloading occurred we first iterate over all inline caches and
  1640   // clear ICs where the cached oop is referring to an unloaded klass or method.
  1641   // The remaining live cached oops will be traversed in the relocInfo::oop_type
  1642   // iteration below.
  1643   if (unloading_occurred) {
  1644     RelocIterator iter(this, low_boundary);
  1645     while(iter.next()) {
  1646       if (iter.type() == relocInfo::virtual_call_type) {
  1647         CompiledIC *ic = CompiledIC_at(iter.reloc());
  1648         oop ic_oop = ic->cached_oop();
  1649         if (ic_oop != NULL && !is_alive->do_object_b(ic_oop)) {
  1650           // The only exception is compiledICHolder oops which may
  1651           // yet be marked below. (We check this further below).
  1652           if (ic_oop->is_compiledICHolder()) {
  1653             compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop);
  1654             if (is_alive->do_object_b(
  1655                   cichk_oop->holder_method()->method_holder()) &&
  1656                 is_alive->do_object_b(cichk_oop->holder_klass())) {
  1657               continue;
  1660           ic->set_to_clean();
  1661           assert(ic->cached_oop() == NULL,
  1662                  "cached oop in IC should be cleared");
  1668   // Compiled code
  1669   RelocIterator iter(this, low_boundary);
  1670   while (iter.next()) {
  1671     if (iter.type() == relocInfo::oop_type) {
  1672       oop_Relocation* r = iter.oop_reloc();
  1673       // In this loop, we must only traverse those oops directly embedded in
  1674       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
  1675       assert(1 == (r->oop_is_immediate()) +
  1676                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1677              "oop must be found in exactly one place");
  1678       if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1679         if (can_unload(is_alive, keep_alive, r->oop_addr(), unloading_occurred)) {
  1680           return;
  1687   // Scopes
  1688   for (oop* p = oops_begin(); p < oops_end(); p++) {
  1689     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  1690     if (can_unload(is_alive, keep_alive, p, unloading_occurred)) {
  1691       return;
  1695 #ifndef PRODUCT
  1696   // This nmethod was not unloaded; check below that all CompiledICs
  1697   // refer to marked oops.
  1699     RelocIterator iter(this, low_boundary);
  1700     while (iter.next()) {
  1701       if (iter.type() == relocInfo::virtual_call_type) {
  1702          CompiledIC *ic = CompiledIC_at(iter.reloc());
  1703          oop ic_oop = ic->cached_oop();
  1704          assert(ic_oop == NULL || is_alive->do_object_b(ic_oop),
  1705                 "Found unmarked ic_oop in reachable nmethod");
  1709 #endif // !PRODUCT
  1712 // This method is called twice during GC -- once while
  1713 // tracing the "active" nmethods on thread stacks during
  1714 // the (strong) marking phase, and then again when walking
  1715 // the code cache contents during the weak roots processing
  1716 // phase. The two uses are distinguished by means of the
  1717 // 'do_strong_roots_only' flag, which is true in the first
  1718 // case. We want to walk the weak roots in the nmethod
  1719 // only in the second case. The weak roots in the nmethod
  1720 // are the oops in the ExceptionCache and the InlineCache
  1721 // oops.
  1722 void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
  1723   // make sure the oops ready to receive visitors
  1724   assert(!is_zombie() && !is_unloaded(),
  1725          "should not call follow on zombie or unloaded nmethod");
  1727   // If the method is not entrant or zombie then a JMP is plastered over the
  1728   // first few bytes.  If an oop in the old code was there, that oop
  1729   // should not get GC'd.  Skip the first few bytes of oops on
  1730   // not-entrant methods.
  1731   address low_boundary = verified_entry_point();
  1732   if (is_not_entrant()) {
  1733     low_boundary += NativeJump::instruction_size;
  1734     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
  1735     // (See comment above.)
  1738   // Compiled code
  1739   f->do_oop((oop*) &_method);
  1740   if (!do_strong_roots_only) {
  1741     // weak roots processing phase -- update ExceptionCache oops
  1742     ExceptionCache* ec = exception_cache();
  1743     while(ec != NULL) {
  1744       f->do_oop((oop*)ec->exception_type_addr());
  1745       ec = ec->next();
  1747   } // Else strong roots phase -- skip oops in ExceptionCache
  1749   RelocIterator iter(this, low_boundary);
  1751   while (iter.next()) {
  1752     if (iter.type() == relocInfo::oop_type ) {
  1753       oop_Relocation* r = iter.oop_reloc();
  1754       // In this loop, we must only follow those oops directly embedded in
  1755       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
  1756       assert(1 == (r->oop_is_immediate()) +
  1757                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1758              "oop must be found in exactly one place");
  1759       if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1760         f->do_oop(r->oop_addr());
  1765   // Scopes
  1766   // This includes oop constants not inlined in the code stream.
  1767   for (oop* p = oops_begin(); p < oops_end(); p++) {
  1768     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  1769     f->do_oop(p);
  1773 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
  1775 nmethod* volatile nmethod::_oops_do_mark_nmethods;
  1777 // An nmethod is "marked" if its _mark_link is set non-null.
  1778 // Even if it is the end of the linked list, it will have a non-null link value,
  1779 // as long as it is on the list.
  1780 // This code must be MP safe, because it is used from parallel GC passes.
  1781 bool nmethod::test_set_oops_do_mark() {
  1782   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  1783   nmethod* observed_mark_link = _oops_do_mark_link;
  1784   if (observed_mark_link == NULL) {
  1785     // Claim this nmethod for this thread to mark.
  1786     observed_mark_link = (nmethod*)
  1787       Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
  1788     if (observed_mark_link == NULL) {
  1790       // Atomically append this nmethod (now claimed) to the head of the list:
  1791       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
  1792       for (;;) {
  1793         nmethod* required_mark_nmethods = observed_mark_nmethods;
  1794         _oops_do_mark_link = required_mark_nmethods;
  1795         observed_mark_nmethods = (nmethod*)
  1796           Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
  1797         if (observed_mark_nmethods == required_mark_nmethods)
  1798           break;
  1800       // Mark was clear when we first saw this guy.
  1801       NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark\n"));
  1802       return false;
  1805   // On fall through, another racing thread marked this nmethod before we did.
  1806   return true;
  1809 void nmethod::oops_do_marking_prologue() {
  1810   NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
  1811   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  1812   // We use cmpxchg_ptr instead of regular assignment here because the user
  1813   // may fork a bunch of threads, and we need them all to see the same state.
  1814   void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
  1815   guarantee(observed == NULL, "no races in this sequential code");
  1818 void nmethod::oops_do_marking_epilogue() {
  1819   assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
  1820   nmethod* cur = _oops_do_mark_nmethods;
  1821   while (cur != NMETHOD_SENTINEL) {
  1822     assert(cur != NULL, "not NULL-terminated");
  1823     nmethod* next = cur->_oops_do_mark_link;
  1824     cur->_oops_do_mark_link = NULL;
  1825     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark\n"));
  1826     cur = next;
  1828   void* required = _oops_do_mark_nmethods;
  1829   void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
  1830   guarantee(observed == required, "no races in this sequential code");
  1831   NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
  1834 class DetectScavengeRoot: public OopClosure {
  1835   bool     _detected_scavenge_root;
  1836 public:
  1837   DetectScavengeRoot() : _detected_scavenge_root(false)
  1838   { NOT_PRODUCT(_print_nm = NULL); }
  1839   bool detected_scavenge_root() { return _detected_scavenge_root; }
  1840   virtual void do_oop(oop* p) {
  1841     if ((*p) != NULL && (*p)->is_scavengable()) {
  1842       NOT_PRODUCT(maybe_print(p));
  1843       _detected_scavenge_root = true;
  1846   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  1848 #ifndef PRODUCT
  1849   nmethod* _print_nm;
  1850   void maybe_print(oop* p) {
  1851     if (_print_nm == NULL)  return;
  1852     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
  1853     tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
  1854                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
  1855                   (intptr_t)(*p), (intptr_t)p);
  1856     (*p)->print();
  1858 #endif //PRODUCT
  1859 };
  1861 bool nmethod::detect_scavenge_root_oops() {
  1862   DetectScavengeRoot detect_scavenge_root;
  1863   NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
  1864   oops_do(&detect_scavenge_root);
  1865   return detect_scavenge_root.detected_scavenge_root();
  1868 // Method that knows how to preserve outgoing arguments at call. This method must be
  1869 // called with a frame corresponding to a Java invoke
  1870 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
  1871 #ifndef SHARK
  1872   if (!method()->is_native()) {
  1873     SimpleScopeDesc ssd(this, fr.pc());
  1874     Bytecode_invoke call(ssd.method(), ssd.bci());
  1875     bool has_receiver = call.has_receiver();
  1876     Symbol* signature = call.signature();
  1877     fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
  1879 #endif // !SHARK
  1883 oop nmethod::embeddedOop_at(u_char* p) {
  1884   RelocIterator iter(this, p, p + oopSize);
  1885   while (iter.next())
  1886     if (iter.type() == relocInfo::oop_type) {
  1887       return iter.oop_reloc()->oop_value();
  1889   return NULL;
  1893 inline bool includes(void* p, void* from, void* to) {
  1894   return from <= p && p < to;
  1898 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
  1899   assert(count >= 2, "must be sentinel values, at least");
  1901 #ifdef ASSERT
  1902   // must be sorted and unique; we do a binary search in find_pc_desc()
  1903   int prev_offset = pcs[0].pc_offset();
  1904   assert(prev_offset == PcDesc::lower_offset_limit,
  1905          "must start with a sentinel");
  1906   for (int i = 1; i < count; i++) {
  1907     int this_offset = pcs[i].pc_offset();
  1908     assert(this_offset > prev_offset, "offsets must be sorted");
  1909     prev_offset = this_offset;
  1911   assert(prev_offset == PcDesc::upper_offset_limit,
  1912          "must end with a sentinel");
  1913 #endif //ASSERT
  1915   // Search for MethodHandle invokes and tag the nmethod.
  1916   for (int i = 0; i < count; i++) {
  1917     if (pcs[i].is_method_handle_invoke()) {
  1918       set_has_method_handle_invokes(true);
  1919       break;
  1922   assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
  1924   int size = count * sizeof(PcDesc);
  1925   assert(scopes_pcs_size() >= size, "oob");
  1926   memcpy(scopes_pcs_begin(), pcs, size);
  1928   // Adjust the final sentinel downward.
  1929   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
  1930   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
  1931   last_pc->set_pc_offset(content_size() + 1);
  1932   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
  1933     // Fill any rounding gaps with copies of the last record.
  1934     last_pc[1] = last_pc[0];
  1936   // The following assert could fail if sizeof(PcDesc) is not
  1937   // an integral multiple of oopSize (the rounding term).
  1938   // If it fails, change the logic to always allocate a multiple
  1939   // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
  1940   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
  1943 void nmethod::copy_scopes_data(u_char* buffer, int size) {
  1944   assert(scopes_data_size() >= size, "oob");
  1945   memcpy(scopes_data_begin(), buffer, size);
  1949 #ifdef ASSERT
  1950 static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
  1951   PcDesc* lower = nm->scopes_pcs_begin();
  1952   PcDesc* upper = nm->scopes_pcs_end();
  1953   lower += 1; // exclude initial sentinel
  1954   PcDesc* res = NULL;
  1955   for (PcDesc* p = lower; p < upper; p++) {
  1956     NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
  1957     if (match_desc(p, pc_offset, approximate)) {
  1958       if (res == NULL)
  1959         res = p;
  1960       else
  1961         res = (PcDesc*) badAddress;
  1964   return res;
  1966 #endif
  1969 // Finds a PcDesc with real-pc equal to "pc"
  1970 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
  1971   address base_address = code_begin();
  1972   if ((pc < base_address) ||
  1973       (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
  1974     return NULL;  // PC is wildly out of range
  1976   int pc_offset = (int) (pc - base_address);
  1978   // Check the PcDesc cache if it contains the desired PcDesc
  1979   // (This as an almost 100% hit rate.)
  1980   PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
  1981   if (res != NULL) {
  1982     assert(res == linear_search(this, pc_offset, approximate), "cache ok");
  1983     return res;
  1986   // Fallback algorithm: quasi-linear search for the PcDesc
  1987   // Find the last pc_offset less than the given offset.
  1988   // The successor must be the required match, if there is a match at all.
  1989   // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
  1990   PcDesc* lower = scopes_pcs_begin();
  1991   PcDesc* upper = scopes_pcs_end();
  1992   upper -= 1; // exclude final sentinel
  1993   if (lower >= upper)  return NULL;  // native method; no PcDescs at all
  1995 #define assert_LU_OK \
  1996   /* invariant on lower..upper during the following search: */ \
  1997   assert(lower->pc_offset() <  pc_offset, "sanity"); \
  1998   assert(upper->pc_offset() >= pc_offset, "sanity")
  1999   assert_LU_OK;
  2001   // Use the last successful return as a split point.
  2002   PcDesc* mid = _pc_desc_cache.last_pc_desc();
  2003   NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2004   if (mid->pc_offset() < pc_offset) {
  2005     lower = mid;
  2006   } else {
  2007     upper = mid;
  2010   // Take giant steps at first (4096, then 256, then 16, then 1)
  2011   const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
  2012   const int RADIX = (1 << LOG2_RADIX);
  2013   for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
  2014     while ((mid = lower + step) < upper) {
  2015       assert_LU_OK;
  2016       NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2017       if (mid->pc_offset() < pc_offset) {
  2018         lower = mid;
  2019       } else {
  2020         upper = mid;
  2021         break;
  2024     assert_LU_OK;
  2027   // Sneak up on the value with a linear search of length ~16.
  2028   while (true) {
  2029     assert_LU_OK;
  2030     mid = lower + 1;
  2031     NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  2032     if (mid->pc_offset() < pc_offset) {
  2033       lower = mid;
  2034     } else {
  2035       upper = mid;
  2036       break;
  2039 #undef assert_LU_OK
  2041   if (match_desc(upper, pc_offset, approximate)) {
  2042     assert(upper == linear_search(this, pc_offset, approximate), "search ok");
  2043     _pc_desc_cache.add_pc_desc(upper);
  2044     return upper;
  2045   } else {
  2046     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
  2047     return NULL;
  2052 bool nmethod::check_all_dependencies() {
  2053   bool found_check = false;
  2054   // wholesale check of all dependencies
  2055   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2056     if (deps.check_dependency() != NULL) {
  2057       found_check = true;
  2058       NOT_DEBUG(break);
  2061   return found_check;  // tell caller if we found anything
  2064 bool nmethod::check_dependency_on(DepChange& changes) {
  2065   // What has happened:
  2066   // 1) a new class dependee has been added
  2067   // 2) dependee and all its super classes have been marked
  2068   bool found_check = false;  // set true if we are upset
  2069   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2070     // Evaluate only relevant dependencies.
  2071     if (deps.spot_check_dependency_at(changes) != NULL) {
  2072       found_check = true;
  2073       NOT_DEBUG(break);
  2076   return found_check;
  2079 bool nmethod::is_evol_dependent_on(klassOop dependee) {
  2080   instanceKlass *dependee_ik = instanceKlass::cast(dependee);
  2081   objArrayOop dependee_methods = dependee_ik->methods();
  2082   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2083     if (deps.type() == Dependencies::evol_method) {
  2084       methodOop method = deps.method_argument(0);
  2085       for (int j = 0; j < dependee_methods->length(); j++) {
  2086         if ((methodOop) dependee_methods->obj_at(j) == method) {
  2087           // RC_TRACE macro has an embedded ResourceMark
  2088           RC_TRACE(0x01000000,
  2089             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
  2090             _method->method_holder()->klass_part()->external_name(),
  2091             _method->name()->as_C_string(),
  2092             _method->signature()->as_C_string(), compile_id(),
  2093             method->method_holder()->klass_part()->external_name(),
  2094             method->name()->as_C_string(),
  2095             method->signature()->as_C_string()));
  2096           if (TraceDependencies || LogCompilation)
  2097             deps.log_dependency(dependee);
  2098           return true;
  2103   return false;
  2106 // Called from mark_for_deoptimization, when dependee is invalidated.
  2107 bool nmethod::is_dependent_on_method(methodOop dependee) {
  2108   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2109     if (deps.type() != Dependencies::evol_method)
  2110       continue;
  2111     methodOop method = deps.method_argument(0);
  2112     if (method == dependee) return true;
  2114   return false;
  2118 bool nmethod::is_patchable_at(address instr_addr) {
  2119   assert(insts_contains(instr_addr), "wrong nmethod used");
  2120   if (is_zombie()) {
  2121     // a zombie may never be patched
  2122     return false;
  2124   return true;
  2128 address nmethod::continuation_for_implicit_exception(address pc) {
  2129   // Exception happened outside inline-cache check code => we are inside
  2130   // an active nmethod => use cpc to determine a return address
  2131   int exception_offset = pc - code_begin();
  2132   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
  2133 #ifdef ASSERT
  2134   if (cont_offset == 0) {
  2135     Thread* thread = ThreadLocalStorage::get_thread_slow();
  2136     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
  2137     HandleMark hm(thread);
  2138     ResourceMark rm(thread);
  2139     CodeBlob* cb = CodeCache::find_blob(pc);
  2140     assert(cb != NULL && cb == this, "");
  2141     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
  2142     print();
  2143     method()->print_codes();
  2144     print_code();
  2145     print_pcs();
  2147 #endif
  2148   if (cont_offset == 0) {
  2149     // Let the normal error handling report the exception
  2150     return NULL;
  2152   return code_begin() + cont_offset;
  2157 void nmethod_init() {
  2158   // make sure you didn't forget to adjust the filler fields
  2159   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
  2163 //-------------------------------------------------------------------------------------------
  2166 // QQQ might we make this work from a frame??
  2167 nmethodLocker::nmethodLocker(address pc) {
  2168   CodeBlob* cb = CodeCache::find_blob(pc);
  2169   guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
  2170   _nm = (nmethod*)cb;
  2171   lock_nmethod(_nm);
  2174 void nmethodLocker::lock_nmethod(nmethod* nm) {
  2175   if (nm == NULL)  return;
  2176   Atomic::inc(&nm->_lock_count);
  2177   guarantee(!nm->is_zombie(), "cannot lock a zombie method");
  2180 void nmethodLocker::unlock_nmethod(nmethod* nm) {
  2181   if (nm == NULL)  return;
  2182   Atomic::dec(&nm->_lock_count);
  2183   guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
  2187 // -----------------------------------------------------------------------------
  2188 // nmethod::get_deopt_original_pc
  2189 //
  2190 // Return the original PC for the given PC if:
  2191 // (a) the given PC belongs to a nmethod and
  2192 // (b) it is a deopt PC
  2193 address nmethod::get_deopt_original_pc(const frame* fr) {
  2194   if (fr->cb() == NULL)  return NULL;
  2196   nmethod* nm = fr->cb()->as_nmethod_or_null();
  2197   if (nm != NULL && nm->is_deopt_pc(fr->pc()))
  2198     return nm->get_original_pc(fr);
  2200   return NULL;
  2204 // -----------------------------------------------------------------------------
  2205 // MethodHandle
  2207 bool nmethod::is_method_handle_return(address return_pc) {
  2208   if (!has_method_handle_invokes())  return false;
  2209   PcDesc* pd = pc_desc_at(return_pc);
  2210   if (pd == NULL)
  2211     return false;
  2212   return pd->is_method_handle_invoke();
  2216 // -----------------------------------------------------------------------------
  2217 // Verification
  2219 class VerifyOopsClosure: public OopClosure {
  2220   nmethod* _nm;
  2221   bool     _ok;
  2222 public:
  2223   VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
  2224   bool ok() { return _ok; }
  2225   virtual void do_oop(oop* p) {
  2226     if ((*p) == NULL || (*p)->is_oop())  return;
  2227     if (_ok) {
  2228       _nm->print_nmethod(true);
  2229       _ok = false;
  2231     tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  2232                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  2234   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2235 };
  2237 void nmethod::verify() {
  2239   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  2240   // seems odd.
  2242   if( is_zombie() || is_not_entrant() )
  2243     return;
  2245   // Make sure all the entry points are correctly aligned for patching.
  2246   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
  2248   assert(method()->is_oop(), "must be valid");
  2250   ResourceMark rm;
  2252   if (!CodeCache::contains(this)) {
  2253     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
  2256   if(is_native_method() )
  2257     return;
  2259   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
  2260   if (nm != this) {
  2261     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
  2262                   this));
  2265   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2266     if (! p->verify(this)) {
  2267       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
  2271   VerifyOopsClosure voc(this);
  2272   oops_do(&voc);
  2273   assert(voc.ok(), "embedded oops must be OK");
  2274   verify_scavenge_root_oops();
  2276   verify_scopes();
  2280 void nmethod::verify_interrupt_point(address call_site) {
  2281   // This code does not work in release mode since
  2282   // owns_lock only is available in debug mode.
  2283   CompiledIC* ic = NULL;
  2284   Thread *cur = Thread::current();
  2285   if (CompiledIC_lock->owner() == cur ||
  2286       ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
  2287        SafepointSynchronize::is_at_safepoint())) {
  2288     ic = CompiledIC_at(call_site);
  2289     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  2290   } else {
  2291     MutexLocker ml_verify (CompiledIC_lock);
  2292     ic = CompiledIC_at(call_site);
  2294   PcDesc* pd = pc_desc_at(ic->end_of_call());
  2295   assert(pd != NULL, "PcDesc must exist");
  2296   for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
  2297                                      pd->obj_decode_offset(), pd->should_reexecute(),
  2298                                      pd->return_oop());
  2299        !sd->is_top(); sd = sd->sender()) {
  2300     sd->verify();
  2304 void nmethod::verify_scopes() {
  2305   if( !method() ) return;       // Runtime stubs have no scope
  2306   if (method()->is_native()) return; // Ignore stub methods.
  2307   // iterate through all interrupt point
  2308   // and verify the debug information is valid.
  2309   RelocIterator iter((nmethod*)this);
  2310   while (iter.next()) {
  2311     address stub = NULL;
  2312     switch (iter.type()) {
  2313       case relocInfo::virtual_call_type:
  2314         verify_interrupt_point(iter.addr());
  2315         break;
  2316       case relocInfo::opt_virtual_call_type:
  2317         stub = iter.opt_virtual_call_reloc()->static_stub();
  2318         verify_interrupt_point(iter.addr());
  2319         break;
  2320       case relocInfo::static_call_type:
  2321         stub = iter.static_call_reloc()->static_stub();
  2322         //verify_interrupt_point(iter.addr());
  2323         break;
  2324       case relocInfo::runtime_call_type:
  2325         address destination = iter.reloc()->value();
  2326         // Right now there is no way to find out which entries support
  2327         // an interrupt point.  It would be nice if we had this
  2328         // information in a table.
  2329         break;
  2331     assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
  2336 // -----------------------------------------------------------------------------
  2337 // Non-product code
  2338 #ifndef PRODUCT
  2340 class DebugScavengeRoot: public OopClosure {
  2341   nmethod* _nm;
  2342   bool     _ok;
  2343 public:
  2344   DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
  2345   bool ok() { return _ok; }
  2346   virtual void do_oop(oop* p) {
  2347     if ((*p) == NULL || !(*p)->is_scavengable())  return;
  2348     if (_ok) {
  2349       _nm->print_nmethod(true);
  2350       _ok = false;
  2352     tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
  2353                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  2354     (*p)->print();
  2356   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2357 };
  2359 void nmethod::verify_scavenge_root_oops() {
  2360   if (!on_scavenge_root_list()) {
  2361     // Actually look inside, to verify the claim that it's clean.
  2362     DebugScavengeRoot debug_scavenge_root(this);
  2363     oops_do(&debug_scavenge_root);
  2364     if (!debug_scavenge_root.ok())
  2365       fatal("found an unadvertised bad non-perm oop in the code cache");
  2367   assert(scavenge_root_not_marked(), "");
  2370 #endif // PRODUCT
  2372 // Printing operations
  2374 void nmethod::print() const {
  2375   ResourceMark rm;
  2376   ttyLocker ttyl;   // keep the following output all in one block
  2378   tty->print("Compiled ");
  2380   if (is_compiled_by_c1()) {
  2381     tty->print("(c1) ");
  2382   } else if (is_compiled_by_c2()) {
  2383     tty->print("(c2) ");
  2384   } else if (is_compiled_by_shark()) {
  2385     tty->print("(shark) ");
  2386   } else {
  2387     tty->print("(nm) ");
  2390   print_on(tty, "nmethod");
  2391   tty->cr();
  2392   if (WizardMode) {
  2393     tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
  2394     tty->print(" for method " INTPTR_FORMAT , (address)method());
  2395     tty->print(" { ");
  2396     if (is_in_use())      tty->print("in_use ");
  2397     if (is_not_entrant()) tty->print("not_entrant ");
  2398     if (is_zombie())      tty->print("zombie ");
  2399     if (is_unloaded())    tty->print("unloaded ");
  2400     if (on_scavenge_root_list())  tty->print("scavenge_root ");
  2401     tty->print_cr("}:");
  2403   if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2404                                               (address)this,
  2405                                               (address)this + size(),
  2406                                               size());
  2407   if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2408                                               relocation_begin(),
  2409                                               relocation_end(),
  2410                                               relocation_size());
  2411   if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2412                                               consts_begin(),
  2413                                               consts_end(),
  2414                                               consts_size());
  2415   if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2416                                               insts_begin(),
  2417                                               insts_end(),
  2418                                               insts_size());
  2419   if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2420                                               stub_begin(),
  2421                                               stub_end(),
  2422                                               stub_size());
  2423   if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2424                                               oops_begin(),
  2425                                               oops_end(),
  2426                                               oops_size());
  2427   if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2428                                               scopes_data_begin(),
  2429                                               scopes_data_end(),
  2430                                               scopes_data_size());
  2431   if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2432                                               scopes_pcs_begin(),
  2433                                               scopes_pcs_end(),
  2434                                               scopes_pcs_size());
  2435   if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2436                                               dependencies_begin(),
  2437                                               dependencies_end(),
  2438                                               dependencies_size());
  2439   if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2440                                               handler_table_begin(),
  2441                                               handler_table_end(),
  2442                                               handler_table_size());
  2443   if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
  2444                                               nul_chk_table_begin(),
  2445                                               nul_chk_table_end(),
  2446                                               nul_chk_table_size());
  2449 void nmethod::print_code() {
  2450   HandleMark hm;
  2451   ResourceMark m;
  2452   Disassembler::decode(this);
  2456 #ifndef PRODUCT
  2458 void nmethod::print_scopes() {
  2459   // Find the first pc desc for all scopes in the code and print it.
  2460   ResourceMark rm;
  2461   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2462     if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
  2463       continue;
  2465     ScopeDesc* sd = scope_desc_at(p->real_pc(this));
  2466     sd->print_on(tty, p);
  2470 void nmethod::print_dependencies() {
  2471   ResourceMark rm;
  2472   ttyLocker ttyl;   // keep the following output all in one block
  2473   tty->print_cr("Dependencies:");
  2474   for (Dependencies::DepStream deps(this); deps.next(); ) {
  2475     deps.print_dependency();
  2476     klassOop ctxk = deps.context_type();
  2477     if (ctxk != NULL) {
  2478       Klass* k = Klass::cast(ctxk);
  2479       if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
  2480         tty->print_cr("   [nmethod<=klass]%s", k->external_name());
  2483     deps.log_dependency();  // put it into the xml log also
  2488 void nmethod::print_relocations() {
  2489   ResourceMark m;       // in case methods get printed via the debugger
  2490   tty->print_cr("relocations:");
  2491   RelocIterator iter(this);
  2492   iter.print();
  2493   if (UseRelocIndex) {
  2494     jint* index_end   = (jint*)relocation_end() - 1;
  2495     jint  index_size  = *index_end;
  2496     jint* index_start = (jint*)( (address)index_end - index_size );
  2497     tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
  2498     if (index_size > 0) {
  2499       jint* ip;
  2500       for (ip = index_start; ip+2 <= index_end; ip += 2)
  2501         tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
  2502                       ip[0],
  2503                       ip[1],
  2504                       header_end()+ip[0],
  2505                       relocation_begin()-1+ip[1]);
  2506       for (; ip < index_end; ip++)
  2507         tty->print_cr("  (%d ?)", ip[0]);
  2508       tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
  2509       tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
  2515 void nmethod::print_pcs() {
  2516   ResourceMark m;       // in case methods get printed via debugger
  2517   tty->print_cr("pc-bytecode offsets:");
  2518   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2519     p->print(this);
  2523 #endif // PRODUCT
  2525 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
  2526   RelocIterator iter(this, begin, end);
  2527   bool have_one = false;
  2528   while (iter.next()) {
  2529     have_one = true;
  2530     switch (iter.type()) {
  2531         case relocInfo::none:                  return "no_reloc";
  2532         case relocInfo::oop_type: {
  2533           stringStream st;
  2534           oop_Relocation* r = iter.oop_reloc();
  2535           oop obj = r->oop_value();
  2536           st.print("oop(");
  2537           if (obj == NULL) st.print("NULL");
  2538           else obj->print_value_on(&st);
  2539           st.print(")");
  2540           return st.as_string();
  2542         case relocInfo::virtual_call_type:     return "virtual_call";
  2543         case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
  2544         case relocInfo::static_call_type:      return "static_call";
  2545         case relocInfo::static_stub_type:      return "static_stub";
  2546         case relocInfo::runtime_call_type:     return "runtime_call";
  2547         case relocInfo::external_word_type:    return "external_word";
  2548         case relocInfo::internal_word_type:    return "internal_word";
  2549         case relocInfo::section_word_type:     return "section_word";
  2550         case relocInfo::poll_type:             return "poll";
  2551         case relocInfo::poll_return_type:      return "poll_return";
  2552         case relocInfo::type_mask:             return "type_bit_mask";
  2555   return have_one ? "other" : NULL;
  2558 // Return a the last scope in (begin..end]
  2559 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
  2560   PcDesc* p = pc_desc_near(begin+1);
  2561   if (p != NULL && p->real_pc(this) <= end) {
  2562     return new ScopeDesc(this, p->scope_decode_offset(),
  2563                          p->obj_decode_offset(), p->should_reexecute(),
  2564                          p->return_oop());
  2566   return NULL;
  2569 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) {
  2570   if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
  2571   if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
  2572   if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
  2573   if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
  2574   if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
  2576   if (has_method_handle_invokes())
  2577     if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
  2579   if (block_begin == consts_begin())            stream->print_cr("[Constants]");
  2581   if (block_begin == entry_point()) {
  2582     methodHandle m = method();
  2583     if (m.not_null()) {
  2584       stream->print("  # ");
  2585       m->print_value_on(stream);
  2586       stream->cr();
  2588     if (m.not_null() && !is_osr_method()) {
  2589       ResourceMark rm;
  2590       int sizeargs = m->size_of_parameters();
  2591       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  2592       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  2594         int sig_index = 0;
  2595         if (!m->is_static())
  2596           sig_bt[sig_index++] = T_OBJECT; // 'this'
  2597         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
  2598           BasicType t = ss.type();
  2599           sig_bt[sig_index++] = t;
  2600           if (type2size[t] == 2) {
  2601             sig_bt[sig_index++] = T_VOID;
  2602           } else {
  2603             assert(type2size[t] == 1, "size is 1 or 2");
  2606         assert(sig_index == sizeargs, "");
  2608       const char* spname = "sp"; // make arch-specific?
  2609       intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
  2610       int stack_slot_offset = this->frame_size() * wordSize;
  2611       int tab1 = 14, tab2 = 24;
  2612       int sig_index = 0;
  2613       int arg_index = (m->is_static() ? 0 : -1);
  2614       bool did_old_sp = false;
  2615       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
  2616         bool at_this = (arg_index == -1);
  2617         bool at_old_sp = false;
  2618         BasicType t = (at_this ? T_OBJECT : ss.type());
  2619         assert(t == sig_bt[sig_index], "sigs in sync");
  2620         if (at_this)
  2621           stream->print("  # this: ");
  2622         else
  2623           stream->print("  # parm%d: ", arg_index);
  2624         stream->move_to(tab1);
  2625         VMReg fst = regs[sig_index].first();
  2626         VMReg snd = regs[sig_index].second();
  2627         if (fst->is_reg()) {
  2628           stream->print("%s", fst->name());
  2629           if (snd->is_valid())  {
  2630             stream->print(":%s", snd->name());
  2632         } else if (fst->is_stack()) {
  2633           int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
  2634           if (offset == stack_slot_offset)  at_old_sp = true;
  2635           stream->print("[%s+0x%x]", spname, offset);
  2636         } else {
  2637           stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
  2639         stream->print(" ");
  2640         stream->move_to(tab2);
  2641         stream->print("= ");
  2642         if (at_this) {
  2643           m->method_holder()->print_value_on(stream);
  2644         } else {
  2645           bool did_name = false;
  2646           if (!at_this && ss.is_object()) {
  2647             Symbol* name = ss.as_symbol_or_null();
  2648             if (name != NULL) {
  2649               name->print_value_on(stream);
  2650               did_name = true;
  2653           if (!did_name)
  2654             stream->print("%s", type2name(t));
  2656         if (at_old_sp) {
  2657           stream->print("  (%s of caller)", spname);
  2658           did_old_sp = true;
  2660         stream->cr();
  2661         sig_index += type2size[t];
  2662         arg_index += 1;
  2663         if (!at_this)  ss.next();
  2665       if (!did_old_sp) {
  2666         stream->print("  # ");
  2667         stream->move_to(tab1);
  2668         stream->print("[%s+0x%x]", spname, stack_slot_offset);
  2669         stream->print("  (%s of caller)", spname);
  2670         stream->cr();
  2676 void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
  2677   // First, find an oopmap in (begin, end].
  2678   // We use the odd half-closed interval so that oop maps and scope descs
  2679   // which are tied to the byte after a call are printed with the call itself.
  2680   address base = code_begin();
  2681   OopMapSet* oms = oop_maps();
  2682   if (oms != NULL) {
  2683     for (int i = 0, imax = oms->size(); i < imax; i++) {
  2684       OopMap* om = oms->at(i);
  2685       address pc = base + om->offset();
  2686       if (pc > begin) {
  2687         if (pc <= end) {
  2688           st->move_to(column);
  2689           st->print("; ");
  2690           om->print_on(st);
  2692         break;
  2697   // Print any debug info present at this pc.
  2698   ScopeDesc* sd  = scope_desc_in(begin, end);
  2699   if (sd != NULL) {
  2700     st->move_to(column);
  2701     if (sd->bci() == SynchronizationEntryBCI) {
  2702       st->print(";*synchronization entry");
  2703     } else {
  2704       if (sd->method().is_null()) {
  2705         st->print("method is NULL");
  2706       } else if (sd->method()->is_native()) {
  2707         st->print("method is native");
  2708       } else {
  2709         Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
  2710         st->print(";*%s", Bytecodes::name(bc));
  2711         switch (bc) {
  2712         case Bytecodes::_invokevirtual:
  2713         case Bytecodes::_invokespecial:
  2714         case Bytecodes::_invokestatic:
  2715         case Bytecodes::_invokeinterface:
  2717             Bytecode_invoke invoke(sd->method(), sd->bci());
  2718             st->print(" ");
  2719             if (invoke.name() != NULL)
  2720               invoke.name()->print_symbol_on(st);
  2721             else
  2722               st->print("<UNKNOWN>");
  2723             break;
  2725         case Bytecodes::_getfield:
  2726         case Bytecodes::_putfield:
  2727         case Bytecodes::_getstatic:
  2728         case Bytecodes::_putstatic:
  2730             Bytecode_field field(sd->method(), sd->bci());
  2731             st->print(" ");
  2732             if (field.name() != NULL)
  2733               field.name()->print_symbol_on(st);
  2734             else
  2735               st->print("<UNKNOWN>");
  2741     // Print all scopes
  2742     for (;sd != NULL; sd = sd->sender()) {
  2743       st->move_to(column);
  2744       st->print("; -");
  2745       if (sd->method().is_null()) {
  2746         st->print("method is NULL");
  2747       } else {
  2748         sd->method()->print_short_name(st);
  2750       int lineno = sd->method()->line_number_from_bci(sd->bci());
  2751       if (lineno != -1) {
  2752         st->print("@%d (line %d)", sd->bci(), lineno);
  2753       } else {
  2754         st->print("@%d", sd->bci());
  2756       st->cr();
  2760   // Print relocation information
  2761   const char* str = reloc_string_for(begin, end);
  2762   if (str != NULL) {
  2763     if (sd != NULL) st->cr();
  2764     st->move_to(column);
  2765     st->print(";   {%s}", str);
  2767   int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
  2768   if (cont_offset != 0) {
  2769     st->move_to(column);
  2770     st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
  2775 #ifndef PRODUCT
  2777 void nmethod::print_value_on(outputStream* st) const {
  2778   print_on(st, "nmethod");
  2781 void nmethod::print_calls(outputStream* st) {
  2782   RelocIterator iter(this);
  2783   while (iter.next()) {
  2784     switch (iter.type()) {
  2785     case relocInfo::virtual_call_type:
  2786     case relocInfo::opt_virtual_call_type: {
  2787       VerifyMutexLocker mc(CompiledIC_lock);
  2788       CompiledIC_at(iter.reloc())->print();
  2789       break;
  2791     case relocInfo::static_call_type:
  2792       st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
  2793       compiledStaticCall_at(iter.reloc())->print();
  2794       break;
  2799 void nmethod::print_handler_table() {
  2800   ExceptionHandlerTable(this).print();
  2803 void nmethod::print_nul_chk_table() {
  2804   ImplicitExceptionTable(this).print(code_begin());
  2807 void nmethod::print_statistics() {
  2808   ttyLocker ttyl;
  2809   if (xtty != NULL)  xtty->head("statistics type='nmethod'");
  2810   nmethod_stats.print_native_nmethod_stats();
  2811   nmethod_stats.print_nmethod_stats();
  2812   DebugInformationRecorder::print_statistics();
  2813   nmethod_stats.print_pc_stats();
  2814   Dependencies::print_statistics();
  2815   if (xtty != NULL)  xtty->tail("statistics");
  2818 #endif // PRODUCT

mercurial