src/share/vm/code/nmethod.hpp

Fri, 09 Oct 2009 15:18:52 -0700

author
trims
date
Fri, 09 Oct 2009 15:18:52 -0700
changeset 1435
a1423fe86a18
parent 1383
89e0543e1737
parent 1429
753cf9794df9
child 1544
032260830071
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // This class is used internally by nmethods, to cache
    26 // exception/pc/handler information.
    28 class ExceptionCache : public CHeapObj {
    29   friend class VMStructs;
    30  private:
    31   static address _unwind_handler;
    32   enum { cache_size = 16 };
    33   klassOop _exception_type;
    34   address  _pc[cache_size];
    35   address  _handler[cache_size];
    36   int      _count;
    37   ExceptionCache* _next;
    39   address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
    40   void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
    41   address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
    42   void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
    43   int     count()                              { return _count; }
    44   void    increment_count()                    { _count++; }
    46  public:
    48   ExceptionCache(Handle exception, address pc, address handler);
    50   klassOop  exception_type()                { return _exception_type; }
    51   klassOop* exception_type_addr()           { return &_exception_type; }
    52   ExceptionCache* next()                    { return _next; }
    53   void      set_next(ExceptionCache *ec)    { _next = ec; }
    55   address match(Handle exception, address pc);
    56   bool    match_exception_with_space(Handle exception) ;
    57   address test_address(address addr);
    58   bool    add_address_and_handler(address addr, address handler) ;
    60   static address unwind_handler() { return _unwind_handler; }
    61 };
    64 // cache pc descs found in earlier inquiries
    65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
    66   friend class VMStructs;
    67  private:
    68   enum { cache_size = 4 };
    69   PcDesc* _last_pc_desc;         // most recent pc_desc found
    70   PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
    71  public:
    72   PcDescCache() { debug_only(_last_pc_desc = NULL); }
    73   void    reset_to(PcDesc* initial_pc_desc);
    74   PcDesc* find_pc_desc(int pc_offset, bool approximate);
    75   void    add_pc_desc(PcDesc* pc_desc);
    76   PcDesc* last_pc_desc() { return _last_pc_desc; }
    77 };
    80 // nmethods (native methods) are the compiled code versions of Java methods.
    82 struct nmFlags {
    83   friend class VMStructs;
    84   unsigned int version:8;                 // version number (0 = first version)
    85   unsigned int level:4;                   // optimization level
    86   unsigned int age:4;                     // age (in # of sweep steps)
    88   unsigned int state:2;                   // {alive, zombie, unloaded)
    90   unsigned int isUncommonRecompiled:1;    // recompiled because of uncommon trap?
    91   unsigned int isToBeRecompiled:1;        // to be recompiled as soon as it matures
    92   unsigned int hasFlushedDependencies:1;  // Used for maintenance of dependencies
    93   unsigned int markedForReclamation:1;    // Used by NMethodSweeper
    95   unsigned int has_unsafe_access:1;       // May fault due to unsafe access.
    97   void clear();
    98 };
   101 // A nmethod contains:
   102 //  - header                 (the nmethod structure)
   103 //  [Relocation]
   104 //  - relocation information
   105 //  - constant part          (doubles, longs and floats used in nmethod)
   106 //  [Code]
   107 //  - code body
   108 //  - exception handler
   109 //  - stub code
   110 //  [Debugging information]
   111 //  - oop array
   112 //  - data array
   113 //  - pcs
   114 //  [Exception handler table]
   115 //  - handler entry point array
   116 //  [Implicit Null Pointer exception table]
   117 //  - implicit null table array
   119 class Dependencies;
   120 class ExceptionHandlerTable;
   121 class ImplicitExceptionTable;
   122 class AbstractCompiler;
   123 class xmlStream;
   125 class nmethod : public CodeBlob {
   126   friend class VMStructs;
   127   friend class NMethodSweeper;
   128   friend class CodeCache;  // non-perm oops
   129  private:
   130   // Shared fields for all nmethod's
   131   static int _zombie_instruction_size;
   133   methodOop _method;
   134   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
   136   // To support simple linked-list chaining of nmethods:
   137   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
   138   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
   140   static nmethod* volatile _oops_do_mark_nmethods;
   141   nmethod*        volatile _oops_do_mark_link;
   143   AbstractCompiler* _compiler; // The compiler which compiled this nmethod
   145   // Offsets for different nmethod parts
   146   int _exception_offset;
   147   // All deoptee's will resume execution at this location described by this offset
   148   int _deoptimize_offset;
   149 #ifdef HAVE_DTRACE_H
   150   int _trap_offset;
   151 #endif // def HAVE_DTRACE_H
   152   int _stub_offset;
   153   int _consts_offset;
   154   int _scopes_data_offset;
   155   int _scopes_pcs_offset;
   156   int _dependencies_offset;
   157   int _handler_table_offset;
   158   int _nul_chk_table_offset;
   159   int _nmethod_end_offset;
   161   // location in frame (offset for sp) that deopt can store the original
   162   // pc during a deopt.
   163   int _orig_pc_offset;
   165   int _compile_id;                     // which compilation made this nmethod
   166   int _comp_level;                     // compilation level
   168   // offsets for entry points
   169   address _entry_point;                // entry point with class check
   170   address _verified_entry_point;       // entry point without class check
   171   address _osr_entry_point;            // entry point for on stack replacement
   173   nmFlags flags;           // various flags to keep track of nmethod state
   174   bool _markedForDeoptimization;       // Used for stack deoptimization
   175   enum { alive        = 0,
   176          not_entrant  = 1, // uncommon trap has happened but activations may still exist
   177          zombie       = 2,
   178          unloaded     = 3 };
   180   // used by jvmti to track if an unload event has been posted for this nmethod.
   181   bool _unload_reported;
   183   jbyte _scavenge_root_state;
   185   NOT_PRODUCT(bool _has_debug_info; )
   187   // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
   188   jint  _lock_count;
   190   // not_entrant method removal. Each mark_sweep pass will update
   191   // this mark to current sweep invocation count if it is seen on the
   192   // stack.  An not_entrant method can be removed when there is no
   193   // more activations, i.e., when the _stack_traversal_mark is less than
   194   // current sweep traversal index.
   195   long _stack_traversal_mark;
   197   ExceptionCache *_exception_cache;
   198   PcDescCache     _pc_desc_cache;
   200   // These are only used for compiled synchronized native methods to
   201   // locate the owner and stack slot for the BasicLock so that we can
   202   // properly revoke the bias of the owner if necessary. They are
   203   // needed because there is no debug information for compiled native
   204   // wrappers and the oop maps are insufficient to allow
   205   // frame::retrieve_receiver() to work. Currently they are expected
   206   // to be byte offsets from the Java stack pointer for maximum code
   207   // sharing between platforms. Note that currently biased locking
   208   // will never cause Class instances to be biased but this code
   209   // handles the static synchronized case as well.
   210   ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
   211   ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
   213   friend class nmethodLocker;
   215   // For native wrappers
   216   nmethod(methodOop method,
   217           int nmethod_size,
   218           CodeOffsets* offsets,
   219           CodeBuffer *code_buffer,
   220           int frame_size,
   221           ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
   222           ByteSize basic_lock_sp_offset,       /* synchronized natives only */
   223           OopMapSet* oop_maps);
   225 #ifdef HAVE_DTRACE_H
   226   // For native wrappers
   227   nmethod(methodOop method,
   228           int nmethod_size,
   229           CodeOffsets* offsets,
   230           CodeBuffer *code_buffer,
   231           int frame_size);
   232 #endif // def HAVE_DTRACE_H
   234   // Creation support
   235   nmethod(methodOop method,
   236           int nmethod_size,
   237           int compile_id,
   238           int entry_bci,
   239           CodeOffsets* offsets,
   240           int orig_pc_offset,
   241           DebugInformationRecorder *recorder,
   242           Dependencies* dependencies,
   243           CodeBuffer *code_buffer,
   244           int frame_size,
   245           OopMapSet* oop_maps,
   246           ExceptionHandlerTable* handler_table,
   247           ImplicitExceptionTable* nul_chk_table,
   248           AbstractCompiler* compiler,
   249           int comp_level);
   251   // helper methods
   252   void* operator new(size_t size, int nmethod_size);
   254   const char* reloc_string_for(u_char* begin, u_char* end);
   255   void make_not_entrant_or_zombie(int state);
   256   void inc_decompile_count();
   258   // used to check that writes to nmFlags are done consistently.
   259   static void check_safepoint() PRODUCT_RETURN;
   261   // Used to manipulate the exception cache
   262   void add_exception_cache_entry(ExceptionCache* new_entry);
   263   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
   265   // Inform external interfaces that a compiled method has been unloaded
   266   inline void post_compiled_method_unload();
   268  public:
   269   // create nmethod with entry_bci
   270   static nmethod* new_nmethod(methodHandle method,
   271                               int compile_id,
   272                               int entry_bci,
   273                               CodeOffsets* offsets,
   274                               int orig_pc_offset,
   275                               DebugInformationRecorder* recorder,
   276                               Dependencies* dependencies,
   277                               CodeBuffer *code_buffer,
   278                               int frame_size,
   279                               OopMapSet* oop_maps,
   280                               ExceptionHandlerTable* handler_table,
   281                               ImplicitExceptionTable* nul_chk_table,
   282                               AbstractCompiler* compiler,
   283                               int comp_level);
   285   static nmethod* new_native_nmethod(methodHandle method,
   286                                      CodeBuffer *code_buffer,
   287                                      int vep_offset,
   288                                      int frame_complete,
   289                                      int frame_size,
   290                                      ByteSize receiver_sp_offset,
   291                                      ByteSize basic_lock_sp_offset,
   292                                      OopMapSet* oop_maps);
   294 #ifdef HAVE_DTRACE_H
   295   // The method we generate for a dtrace probe has to look
   296   // like an nmethod as far as the rest of the system is concerned
   297   // which is somewhat unfortunate.
   298   static nmethod* new_dtrace_nmethod(methodHandle method,
   299                                      CodeBuffer *code_buffer,
   300                                      int vep_offset,
   301                                      int trap_offset,
   302                                      int frame_complete,
   303                                      int frame_size);
   305   int trap_offset() const      { return _trap_offset; }
   306   address trap_address() const { return code_begin() + _trap_offset; }
   308 #endif // def HAVE_DTRACE_H
   310   // accessors
   311   methodOop method() const                        { return _method; }
   312   AbstractCompiler* compiler() const              { return _compiler; }
   314 #ifndef PRODUCT
   315   bool has_debug_info() const                     { return _has_debug_info; }
   316   void set_has_debug_info(bool f)                 { _has_debug_info = false; }
   317 #endif // NOT PRODUCT
   319   // type info
   320   bool is_nmethod() const                         { return true; }
   321   bool is_java_method() const                     { return !method()->is_native(); }
   322   bool is_native_method() const                   { return method()->is_native(); }
   323   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
   325   bool is_compiled_by_c1() const;
   326   bool is_compiled_by_c2() const;
   328   // boundaries for different parts
   329   address code_begin         () const             { return _entry_point; }
   330   address code_end           () const             { return           header_begin() + _stub_offset          ; }
   331   address exception_begin    () const             { return           header_begin() + _exception_offset     ; }
   332   address deopt_handler_begin() const             { return           header_begin() + _deoptimize_offset    ; }
   333   address stub_begin         () const             { return           header_begin() + _stub_offset          ; }
   334   address stub_end           () const             { return           header_begin() + _consts_offset        ; }
   335   address consts_begin       () const             { return           header_begin() + _consts_offset        ; }
   336   address consts_end         () const             { return           header_begin() + _scopes_data_offset   ; }
   337   address scopes_data_begin  () const             { return           header_begin() + _scopes_data_offset   ; }
   338   address scopes_data_end    () const             { return           header_begin() + _scopes_pcs_offset    ; }
   339   PcDesc* scopes_pcs_begin   () const             { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
   340   PcDesc* scopes_pcs_end     () const             { return (PcDesc*)(header_begin() + _dependencies_offset); }
   341   address dependencies_begin () const             { return           header_begin() + _dependencies_offset ; }
   342   address dependencies_end   () const             { return           header_begin() + _handler_table_offset ; }
   343   address handler_table_begin() const             { return           header_begin() + _handler_table_offset ; }
   344   address handler_table_end  () const             { return           header_begin() + _nul_chk_table_offset   ; }
   345   address nul_chk_table_begin() const             { return           header_begin() + _nul_chk_table_offset ; }
   346   address nul_chk_table_end  () const             { return           header_begin() + _nmethod_end_offset   ; }
   348   int code_size         () const                  { return      code_end         () -      code_begin         (); }
   349   int stub_size         () const                  { return      stub_end         () -      stub_begin         (); }
   350   int consts_size       () const                  { return      consts_end       () -      consts_begin       (); }
   351   int scopes_data_size  () const                  { return      scopes_data_end  () -      scopes_data_begin  (); }
   352   int scopes_pcs_size   () const                  { return (intptr_t)scopes_pcs_end   () - (intptr_t)scopes_pcs_begin   (); }
   353   int dependencies_size () const                  { return      dependencies_end () -      dependencies_begin (); }
   354   int handler_table_size() const                  { return      handler_table_end() -      handler_table_begin(); }
   355   int nul_chk_table_size() const                  { return      nul_chk_table_end() -      nul_chk_table_begin(); }
   357   int total_size        () const;
   359   bool code_contains         (address addr) const { return code_begin         () <= addr && addr < code_end         (); }
   360   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
   361   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   362   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
   363   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
   364   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
   365   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
   367   // entry points
   368   address entry_point() const                     { return _entry_point;             } // normal entry point
   369   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
   371   // flag accessing and manipulation
   372   bool  is_in_use() const                         { return flags.state == alive; }
   373   bool  is_alive() const                          { return flags.state == alive || flags.state == not_entrant; }
   374   bool  is_not_entrant() const                    { return flags.state == not_entrant; }
   375   bool  is_zombie() const                         { return flags.state == zombie; }
   376   bool  is_unloaded() const                       { return flags.state == unloaded;   }
   378   // Make the nmethod non entrant. The nmethod will continue to be alive.
   379   // It is used when an uncommon trap happens.
   380   void  make_not_entrant()                        { make_not_entrant_or_zombie(not_entrant); }
   381   void  make_zombie()                             { make_not_entrant_or_zombie(zombie); }
   383   // used by jvmti to track if the unload event has been reported
   384   bool  unload_reported()                         { return _unload_reported; }
   385   void  set_unload_reported()                     { _unload_reported = true; }
   387   bool  is_marked_for_deoptimization() const      { return _markedForDeoptimization; }
   388   void  mark_for_deoptimization()                 { _markedForDeoptimization = true; }
   390   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
   392   bool has_dependencies()                         { return dependencies_size() != 0; }
   393   void flush_dependencies(BoolObjectClosure* is_alive);
   394   bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
   395   void  set_has_flushed_dependencies()            {
   396     check_safepoint();
   397     assert(!has_flushed_dependencies(), "should only happen once");
   398     flags.hasFlushedDependencies = 1;
   399   }
   401   bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
   402   void  mark_for_reclamation()                    { check_safepoint(); flags.markedForReclamation = 1; }
   403   void  unmark_for_reclamation()                  { check_safepoint(); flags.markedForReclamation = 0; }
   405   bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
   406   void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
   408   int   level() const                             { return flags.level; }
   409   void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
   411   int   comp_level() const                        { return _comp_level; }
   413   int   version() const                           { return flags.version; }
   414   void  set_version(int v);
   416   // Non-perm oop support
   417   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
   418  protected:
   419   enum { npl_on_list = 0x01, npl_marked = 0x10 };
   420   void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
   421   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   422   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
   423 #ifndef PRODUCT
   424   void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
   425   void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
   426   bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
   427   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
   428 #endif //PRODUCT
   429   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   430   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
   432  public:
   434   // Sweeper support
   435   long  stack_traversal_mark()                    { return _stack_traversal_mark; }
   436   void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
   438   // Exception cache support
   439   ExceptionCache* exception_cache() const         { return _exception_cache; }
   440   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
   441   address handler_for_exception_and_pc(Handle exception, address pc);
   442   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
   443   void remove_from_exception_cache(ExceptionCache* ec);
   445   // implicit exceptions support
   446   address continuation_for_implicit_exception(address pc);
   448   // On-stack replacement support
   449   int   osr_entry_bci() const                     { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
   450   address  osr_entry() const                      { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
   451   void  invalidate_osr_method();
   452   nmethod* osr_link() const                       { return _osr_link; }
   453   void     set_osr_link(nmethod *n)               { _osr_link = n; }
   455   // tells whether frames described by this nmethod can be deoptimized
   456   // note: native wrappers cannot be deoptimized.
   457   bool can_be_deoptimized() const { return is_java_method(); }
   459   // Inline cache support
   460   void clear_inline_caches();
   461   void cleanup_inline_caches();
   462   bool inlinecache_check_contains(address addr) const {
   463     return (addr >= instructions_begin() && addr < verified_entry_point());
   464   }
   466   // unlink and deallocate this nmethod
   467   // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
   468   // expected to use any other private methods/data in this class.
   470  protected:
   471   void flush();
   473  public:
   474   // If returning true, it is unsafe to remove this nmethod even though it is a zombie
   475   // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
   476   bool is_locked_by_vm() const                    { return _lock_count >0; }
   478   // See comment at definition of _last_seen_on_stack
   479   void mark_as_seen_on_stack();
   480   bool can_not_entrant_be_converted();
   482   // Evolution support. We make old (discarded) compiled methods point to new methodOops.
   483   void set_method(methodOop method) { _method = method; }
   485   // GC support
   486   void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
   487                     bool unloading_occurred);
   488   bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
   489                   oop* root, bool unloading_occurred);
   491   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
   492                                      OopClosure* f);
   493   virtual void oops_do(OopClosure* f) { oops_do(f, false); }
   494   void         oops_do(OopClosure* f, bool do_strong_roots_only);
   495   bool detect_scavenge_root_oops();
   496   void verify_scavenge_root_oops() PRODUCT_RETURN;
   498   bool test_set_oops_do_mark();
   499   static void oops_do_marking_prologue();
   500   static void oops_do_marking_epilogue();
   501   static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
   502   DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
   504   // ScopeDesc for an instruction
   505   ScopeDesc* scope_desc_at(address pc);
   507  private:
   508   ScopeDesc* scope_desc_in(address begin, address end);
   510   address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
   512   PcDesc* find_pc_desc_internal(address pc, bool approximate);
   514   PcDesc* find_pc_desc(address pc, bool approximate) {
   515     PcDesc* desc = _pc_desc_cache.last_pc_desc();
   516     if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
   517       return desc;
   518     }
   519     return find_pc_desc_internal(pc, approximate);
   520   }
   522  public:
   523   // ScopeDesc retrieval operation
   524   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
   525   // pc_desc_near returns the first PcDesc at or after the givne pc.
   526   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
   528  public:
   529   // copying of debugging information
   530   void copy_scopes_pcs(PcDesc* pcs, int count);
   531   void copy_scopes_data(address buffer, int size);
   533   // deopt
   534   // return true is the pc is one would expect if the frame is being deopted.
   535   bool is_deopt_pc(address pc);
   536   // Accessor/mutator for the original pc of a frame before a frame was deopted.
   537   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
   538   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
   540   // jvmti support:
   541   void post_compiled_method_load_event();
   543   // verify operations
   544   void verify();
   545   void verify_scopes();
   546   void verify_interrupt_point(address interrupt_point);
   548   // printing support
   549   void print()                          const;
   550   void print_code();
   551   void print_relocations()                        PRODUCT_RETURN;
   552   void print_pcs()                                PRODUCT_RETURN;
   553   void print_scopes()                             PRODUCT_RETURN;
   554   void print_dependencies()                       PRODUCT_RETURN;
   555   void print_value_on(outputStream* st) const     PRODUCT_RETURN;
   556   void print_calls(outputStream* st)              PRODUCT_RETURN;
   557   void print_handler_table()                      PRODUCT_RETURN;
   558   void print_nul_chk_table()                      PRODUCT_RETURN;
   559   void print_nmethod(bool print_code);
   561   void print_on(outputStream* st, const char* title) const;
   563   // Logging
   564   void log_identity(xmlStream* log) const;
   565   void log_new_nmethod() const;
   566   void log_state_change(int state) const;
   568   // Prints a comment for one native instruction (reloc info, pc desc)
   569   void print_code_comment_on(outputStream* st, int column, address begin, address end);
   570   static void print_statistics()                  PRODUCT_RETURN;
   572   // Compiler task identification.  Note that all OSR methods
   573   // are numbered in an independent sequence if CICountOSR is true,
   574   // and native method wrappers are also numbered independently if
   575   // CICountNative is true.
   576   int  compile_id() const                         { return _compile_id; }
   577   const char* compile_kind() const;
   579   // For debugging
   580   // CompiledIC*    IC_at(char* p) const;
   581   // PrimitiveIC*   primitiveIC_at(char* p) const;
   582   oop embeddedOop_at(address p);
   584   // tells if any of this method's dependencies have been invalidated
   585   // (this is expensive!)
   586   bool check_all_dependencies();
   588   // tells if this compiled method is dependent on the given changes,
   589   // and the changes have invalidated it
   590   bool check_dependency_on(DepChange& changes);
   592   // Evolution support. Tells if this compiled method is dependent on any of
   593   // methods m() of class dependee, such that if m() in dependee is replaced,
   594   // this compiled method will have to be deoptimized.
   595   bool is_evol_dependent_on(klassOop dependee);
   597   // Fast breakpoint support. Tells if this compiled method is
   598   // dependent on the given method. Returns true if this nmethod
   599   // corresponds to the given method as well.
   600   bool is_dependent_on_method(methodOop dependee);
   602   // is it ok to patch at address?
   603   bool is_patchable_at(address instr_address);
   605   // UseBiasedLocking support
   606   ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
   607     return _compiled_synchronized_native_basic_lock_owner_sp_offset;
   608   }
   609   ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
   610     return _compiled_synchronized_native_basic_lock_sp_offset;
   611   }
   613   // support for code generation
   614   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
   615   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
   616   static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
   618 };
   620 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
   621 class nmethodLocker : public StackObj {
   622   nmethod* _nm;
   624   static void lock_nmethod(nmethod* nm);   // note: nm can be NULL
   625   static void unlock_nmethod(nmethod* nm); // (ditto)
   627  public:
   628   nmethodLocker(address pc); // derive nm from pc
   629   nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
   630   nmethodLocker() { _nm = NULL; }
   631   ~nmethodLocker() { unlock_nmethod(_nm); }
   633   nmethod* code() { return _nm; }
   634   void set_code(nmethod* new_nm) {
   635     unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
   636     _nm = new_nm;
   637     lock_nmethod(_nm);
   638   }
   639 };

mercurial