duke@435: /* kamg@2511: * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_CODE_NMETHOD_HPP stefank@2314: #define SHARE_VM_CODE_NMETHOD_HPP stefank@2314: stefank@2314: #include "code/codeBlob.hpp" stefank@2314: #include "code/pcDesc.hpp" stefank@2314: duke@435: // This class is used internally by nmethods, to cache duke@435: // exception/pc/handler information. duke@435: duke@435: class ExceptionCache : public CHeapObj { duke@435: friend class VMStructs; duke@435: private: duke@435: enum { cache_size = 16 }; duke@435: klassOop _exception_type; duke@435: address _pc[cache_size]; duke@435: address _handler[cache_size]; duke@435: int _count; duke@435: ExceptionCache* _next; duke@435: duke@435: address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } duke@435: void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } duke@435: address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } duke@435: void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } duke@435: int count() { return _count; } duke@435: void increment_count() { _count++; } duke@435: duke@435: public: duke@435: duke@435: ExceptionCache(Handle exception, address pc, address handler); duke@435: duke@435: klassOop exception_type() { return _exception_type; } duke@435: klassOop* exception_type_addr() { return &_exception_type; } duke@435: ExceptionCache* next() { return _next; } duke@435: void set_next(ExceptionCache *ec) { _next = ec; } duke@435: duke@435: address match(Handle exception, address pc); duke@435: bool match_exception_with_space(Handle exception) ; duke@435: address test_address(address addr); duke@435: bool add_address_and_handler(address addr, address handler) ; duke@435: }; duke@435: duke@435: duke@435: // cache pc descs found in earlier inquiries duke@435: class PcDescCache VALUE_OBJ_CLASS_SPEC { duke@435: friend class VMStructs; duke@435: private: duke@435: enum { cache_size = 4 }; duke@435: PcDesc* _last_pc_desc; // most recent pc_desc found duke@435: PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found duke@435: public: duke@435: PcDescCache() { debug_only(_last_pc_desc = NULL); } duke@435: void reset_to(PcDesc* initial_pc_desc); duke@435: PcDesc* find_pc_desc(int pc_offset, bool approximate); duke@435: void add_pc_desc(PcDesc* pc_desc); duke@435: PcDesc* last_pc_desc() { return _last_pc_desc; } duke@435: }; duke@435: duke@435: duke@435: // nmethods (native methods) are the compiled code versions of Java methods. never@1999: // never@1999: // An nmethod contains: duke@435: // - header (the nmethod structure) duke@435: // [Relocation] duke@435: // - relocation information duke@435: // - constant part (doubles, longs and floats used in nmethod) twisti@1918: // - oop table duke@435: // [Code] duke@435: // - code body duke@435: // - exception handler duke@435: // - stub code duke@435: // [Debugging information] duke@435: // - oop array duke@435: // - data array duke@435: // - pcs duke@435: // [Exception handler table] duke@435: // - handler entry point array duke@435: // [Implicit Null Pointer exception table] duke@435: // - implicit null table array duke@435: duke@435: class Dependencies; duke@435: class ExceptionHandlerTable; duke@435: class ImplicitExceptionTable; duke@435: class AbstractCompiler; duke@435: class xmlStream; duke@435: duke@435: class nmethod : public CodeBlob { duke@435: friend class VMStructs; duke@435: friend class NMethodSweeper; jrose@1424: friend class CodeCache; // non-perm oops duke@435: private: duke@435: // Shared fields for all nmethod's duke@435: methodOop _method; duke@435: int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method never@1971: jmethodID _jmethod_id; // Cache of method()->jmethod_id() duke@435: jrose@1424: // To support simple linked-list chaining of nmethods: jrose@1424: nmethod* _osr_link; // from instanceKlass::osr_nmethods_head jrose@1424: nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods kvn@1637: nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect jrose@1424: jrose@1424: static nmethod* volatile _oops_do_mark_nmethods; jrose@1424: nmethod* volatile _oops_do_mark_link; duke@435: duke@435: AbstractCompiler* _compiler; // The compiler which compiled this nmethod duke@435: never@1999: // offsets for entry points never@1999: address _entry_point; // entry point with class check never@1999: address _verified_entry_point; // entry point without class check never@1999: address _osr_entry_point; // entry point for on stack replacement never@1999: duke@435: // Offsets for different nmethod parts duke@435: int _exception_offset; twisti@1639: // All deoptee's will resume execution at this location described by twisti@1639: // this offset. duke@435: int _deoptimize_offset; twisti@1639: // All deoptee's at a MethodHandle call site will resume execution twisti@1639: // at this location described by this offset. twisti@1639: int _deoptimize_mh_offset; never@1813: // Offset of the unwind handler if it exists never@1813: int _unwind_handler_offset; never@1813: kamg@551: #ifdef HAVE_DTRACE_H kamg@551: int _trap_offset; kamg@551: #endif // def HAVE_DTRACE_H twisti@2117: int _consts_offset; duke@435: int _stub_offset; twisti@1918: int _oops_offset; // offset to where embedded oop table begins (inside data) duke@435: int _scopes_data_offset; duke@435: int _scopes_pcs_offset; duke@435: int _dependencies_offset; duke@435: int _handler_table_offset; duke@435: int _nul_chk_table_offset; duke@435: int _nmethod_end_offset; duke@435: duke@435: // location in frame (offset for sp) that deopt can store the original duke@435: // pc during a deopt. duke@435: int _orig_pc_offset; duke@435: never@1999: int _compile_id; // which compilation made this nmethod never@1999: int _comp_level; // compilation level duke@435: never@1999: // protected by CodeCache_lock never@1999: bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) never@1999: bool _speculatively_disconnected; // Marked for potential unload duke@435: never@1999: bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper) never@1999: bool _marked_for_deoptimization; // Used for stack deoptimization never@1999: never@1999: // used by jvmti to track if an unload event has been posted for this nmethod. never@1999: bool _unload_reported; never@1999: never@1999: // set during construction never@1999: unsigned int _has_unsafe_access:1; // May fault due to unsafe access. never@1999: unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? never@1999: never@1999: // Protected by Patching_lock never@1999: unsigned char _state; // {alive, not_entrant, zombie, unloaded) never@1999: never@2081: #ifdef ASSERT never@2081: bool _oops_are_stale; // indicates that it's no longer safe to access oops section never@2081: #endif never@2081: duke@435: enum { alive = 0, twisti@1040: not_entrant = 1, // uncommon trap has happened but activations may still exist duke@435: zombie = 2, duke@435: unloaded = 3 }; duke@435: duke@435: jrose@1424: jbyte _scavenge_root_state; jrose@1424: duke@435: NOT_PRODUCT(bool _has_debug_info; ) duke@435: duke@435: // Nmethod Flushing lock (if non-zero, then the nmethod is not removed) duke@435: jint _lock_count; duke@435: duke@435: // not_entrant method removal. Each mark_sweep pass will update duke@435: // this mark to current sweep invocation count if it is seen on the duke@435: // stack. An not_entrant method can be removed when there is no duke@435: // more activations, i.e., when the _stack_traversal_mark is less than duke@435: // current sweep traversal index. duke@435: long _stack_traversal_mark; duke@435: duke@435: ExceptionCache *_exception_cache; duke@435: PcDescCache _pc_desc_cache; duke@435: kamg@2361: // These are used for compiled synchronized native methods to duke@435: // locate the owner and stack slot for the BasicLock so that we can duke@435: // properly revoke the bias of the owner if necessary. They are duke@435: // needed because there is no debug information for compiled native duke@435: // wrappers and the oop maps are insufficient to allow duke@435: // frame::retrieve_receiver() to work. Currently they are expected duke@435: // to be byte offsets from the Java stack pointer for maximum code duke@435: // sharing between platforms. Note that currently biased locking duke@435: // will never cause Class instances to be biased but this code duke@435: // handles the static synchronized case as well. kamg@2361: // JVMTI's GetLocalInstance() also uses these offsets to find the receiver kamg@2361: // for non-static native wrapper frames. kamg@2361: ByteSize _native_receiver_sp_offset; kamg@2361: ByteSize _native_basic_lock_sp_offset; duke@435: duke@435: friend class nmethodLocker; duke@435: duke@435: // For native wrappers duke@435: nmethod(methodOop method, duke@435: int nmethod_size, duke@435: CodeOffsets* offsets, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ duke@435: ByteSize basic_lock_sp_offset, /* synchronized natives only */ duke@435: OopMapSet* oop_maps); duke@435: kamg@551: #ifdef HAVE_DTRACE_H kamg@551: // For native wrappers kamg@551: nmethod(methodOop method, kamg@551: int nmethod_size, kamg@551: CodeOffsets* offsets, kamg@551: CodeBuffer *code_buffer, kamg@551: int frame_size); kamg@551: #endif // def HAVE_DTRACE_H kamg@551: duke@435: // Creation support duke@435: nmethod(methodOop method, duke@435: int nmethod_size, duke@435: int compile_id, duke@435: int entry_bci, duke@435: CodeOffsets* offsets, duke@435: int orig_pc_offset, duke@435: DebugInformationRecorder *recorder, duke@435: Dependencies* dependencies, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: OopMapSet* oop_maps, duke@435: ExceptionHandlerTable* handler_table, duke@435: ImplicitExceptionTable* nul_chk_table, duke@435: AbstractCompiler* compiler, duke@435: int comp_level); duke@435: duke@435: // helper methods duke@435: void* operator new(size_t size, int nmethod_size); duke@435: duke@435: const char* reloc_string_for(u_char* begin, u_char* end); never@1544: // Returns true if this thread changed the state of the nmethod or never@1544: // false if another thread performed the transition. never@1576: bool make_not_entrant_or_zombie(unsigned int state); duke@435: void inc_decompile_count(); duke@435: duke@435: // Used to manipulate the exception cache duke@435: void add_exception_cache_entry(ExceptionCache* new_entry); duke@435: ExceptionCache* exception_cache_entry_for_exception(Handle exception); duke@435: duke@435: // Inform external interfaces that a compiled method has been unloaded never@1999: void post_compiled_method_unload(); never@1999: never@1999: // Initailize fields to their default values never@1999: void init_defaults(); duke@435: duke@435: public: duke@435: // create nmethod with entry_bci duke@435: static nmethod* new_nmethod(methodHandle method, duke@435: int compile_id, duke@435: int entry_bci, duke@435: CodeOffsets* offsets, duke@435: int orig_pc_offset, duke@435: DebugInformationRecorder* recorder, duke@435: Dependencies* dependencies, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: OopMapSet* oop_maps, duke@435: ExceptionHandlerTable* handler_table, duke@435: ImplicitExceptionTable* nul_chk_table, duke@435: AbstractCompiler* compiler, duke@435: int comp_level); duke@435: duke@435: static nmethod* new_native_nmethod(methodHandle method, duke@435: CodeBuffer *code_buffer, duke@435: int vep_offset, duke@435: int frame_complete, duke@435: int frame_size, duke@435: ByteSize receiver_sp_offset, duke@435: ByteSize basic_lock_sp_offset, duke@435: OopMapSet* oop_maps); duke@435: kamg@551: #ifdef HAVE_DTRACE_H kamg@551: // The method we generate for a dtrace probe has to look kamg@551: // like an nmethod as far as the rest of the system is concerned kamg@551: // which is somewhat unfortunate. kamg@551: static nmethod* new_dtrace_nmethod(methodHandle method, kamg@551: CodeBuffer *code_buffer, kamg@551: int vep_offset, kamg@551: int trap_offset, kamg@551: int frame_complete, kamg@551: int frame_size); kamg@551: kamg@551: int trap_offset() const { return _trap_offset; } twisti@2103: address trap_address() const { return insts_begin() + _trap_offset; } kamg@551: kamg@551: #endif // def HAVE_DTRACE_H kamg@551: duke@435: // accessors duke@435: methodOop method() const { return _method; } duke@435: AbstractCompiler* compiler() const { return _compiler; } duke@435: duke@435: #ifndef PRODUCT duke@435: bool has_debug_info() const { return _has_debug_info; } duke@435: void set_has_debug_info(bool f) { _has_debug_info = false; } duke@435: #endif // NOT PRODUCT duke@435: duke@435: // type info duke@435: bool is_nmethod() const { return true; } duke@435: bool is_java_method() const { return !method()->is_native(); } duke@435: bool is_native_method() const { return method()->is_native(); } duke@435: bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } duke@435: duke@435: bool is_compiled_by_c1() const; duke@435: bool is_compiled_by_c2() const; twisti@2047: bool is_compiled_by_shark() const; duke@435: duke@435: // boundaries for different parts twisti@2117: address consts_begin () const { return header_begin() + _consts_offset ; } twisti@2117: address consts_end () const { return header_begin() + code_offset() ; } twisti@2117: address insts_begin () const { return header_begin() + code_offset() ; } twisti@2103: address insts_end () const { return header_begin() + _stub_offset ; } twisti@2117: address stub_begin () const { return header_begin() + _stub_offset ; } twisti@2117: address stub_end () const { return header_begin() + _oops_offset ; } twisti@1639: address exception_begin () const { return header_begin() + _exception_offset ; } twisti@1639: address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } twisti@1639: address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } never@1813: address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } twisti@1918: oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } twisti@1918: oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; } twisti@1918: twisti@1639: address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } twisti@1639: address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } twisti@1639: PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } twisti@1639: PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } twisti@1639: address dependencies_begin () const { return header_begin() + _dependencies_offset ; } twisti@1639: address dependencies_end () const { return header_begin() + _handler_table_offset ; } twisti@1639: address handler_table_begin () const { return header_begin() + _handler_table_offset ; } twisti@1639: address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } twisti@1639: address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } twisti@1639: address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } duke@435: twisti@1918: // Sizes twisti@2117: int consts_size () const { return consts_end () - consts_begin (); } twisti@2103: int insts_size () const { return insts_end () - insts_begin (); } twisti@1918: int stub_size () const { return stub_end () - stub_begin (); } twisti@1918: int oops_size () const { return (address) oops_end () - (address) oops_begin (); } twisti@1918: int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } twisti@1918: int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); } twisti@1918: int dependencies_size () const { return dependencies_end () - dependencies_begin (); } twisti@1918: int handler_table_size() const { return handler_table_end() - handler_table_begin(); } twisti@1918: int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } duke@435: duke@435: int total_size () const; duke@435: twisti@1918: // Containment twisti@2117: bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } twisti@2103: bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } duke@435: bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } twisti@1918: bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } duke@435: bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } duke@435: bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } duke@435: bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } duke@435: bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } duke@435: duke@435: // entry points duke@435: address entry_point() const { return _entry_point; } // normal entry point duke@435: address verified_entry_point() const { return _verified_entry_point; } // if klass is correct duke@435: duke@435: // flag accessing and manipulation never@1999: bool is_in_use() const { return _state == alive; } never@1999: bool is_alive() const { return _state == alive || _state == not_entrant; } never@1999: bool is_not_entrant() const { return _state == not_entrant; } never@1999: bool is_zombie() const { return _state == zombie; } never@1999: bool is_unloaded() const { return _state == unloaded; } duke@435: never@1544: // Make the nmethod non entrant. The nmethod will continue to be never@1544: // alive. It is used when an uncommon trap happens. Returns true never@1544: // if this thread changed the state of the nmethod or false if never@1544: // another thread performed the transition. never@1544: bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } never@1544: bool make_zombie() { return make_not_entrant_or_zombie(zombie); } duke@435: duke@435: // used by jvmti to track if the unload event has been reported duke@435: bool unload_reported() { return _unload_reported; } duke@435: void set_unload_reported() { _unload_reported = true; } duke@435: never@1999: bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } never@1999: void mark_for_deoptimization() { _marked_for_deoptimization = true; } duke@435: duke@435: void make_unloaded(BoolObjectClosure* is_alive, oop cause); duke@435: duke@435: bool has_dependencies() { return dependencies_size() != 0; } duke@435: void flush_dependencies(BoolObjectClosure* is_alive); never@1999: bool has_flushed_dependencies() { return _has_flushed_dependencies; } never@1999: void set_has_flushed_dependencies() { duke@435: assert(!has_flushed_dependencies(), "should only happen once"); never@1999: _has_flushed_dependencies = 1; duke@435: } duke@435: never@1999: bool is_marked_for_reclamation() const { return _marked_for_reclamation; } never@1999: void mark_for_reclamation() { _marked_for_reclamation = 1; } duke@435: never@1999: bool has_unsafe_access() const { return _has_unsafe_access; } never@1999: void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } duke@435: never@1999: bool has_method_handle_invokes() const { return _has_method_handle_invokes; } never@1999: void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } twisti@1570: never@1999: bool is_speculatively_disconnected() const { return _speculatively_disconnected; } never@1999: void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; } kvn@1637: duke@435: int comp_level() const { return _comp_level; } duke@435: twisti@1918: // Support for oops in scopes and relocs: twisti@1918: // Note: index 0 is reserved for null. twisti@1918: oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } twisti@1918: oop* oop_addr_at(int index) const { // for GC twisti@1918: // relocation indexes are biased by 1 (because 0 is reserved) twisti@1918: assert(index > 0 && index <= oops_size(), "must be a valid non-zero index"); never@2081: assert(!_oops_are_stale, "oops are stale"); twisti@1918: return &oops_begin()[index - 1]; twisti@1918: } twisti@1918: twisti@1918: void copy_oops(GrowableArray* oops); twisti@1918: twisti@1918: // Relocation support twisti@1918: private: twisti@1918: void fix_oop_relocations(address begin, address end, bool initialize_immediates); twisti@1918: inline void initialize_immediate_oop(oop* dest, jobject handle); twisti@1918: twisti@1918: public: twisti@1918: void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } twisti@1918: void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } twisti@1918: twisti@1918: bool is_at_poll_return(address pc); twisti@1918: bool is_at_poll_or_poll_return(address pc); twisti@1918: jrose@1424: // Non-perm oop support jrose@1424: bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } jrose@1424: protected: jrose@1424: enum { npl_on_list = 0x01, npl_marked = 0x10 }; jrose@1424: void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; } jrose@1424: void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } jrose@1424: // assertion-checking and pruning logic uses the bits of _scavenge_root_state jrose@1424: #ifndef PRODUCT jrose@1424: void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; } jrose@1424: void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; } jrose@1424: bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; } jrose@1424: // N.B. there is no positive marked query, and we only use the not_marked query for asserts. jrose@1424: #endif //PRODUCT jrose@1424: nmethod* scavenge_root_link() const { return _scavenge_root_link; } jrose@1424: void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } jrose@1424: kvn@1637: nmethod* saved_nmethod_link() const { return _saved_nmethod_link; } kvn@1637: void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; } kvn@1637: jrose@1424: public: jrose@1424: duke@435: // Sweeper support duke@435: long stack_traversal_mark() { return _stack_traversal_mark; } duke@435: void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } duke@435: duke@435: // Exception cache support duke@435: ExceptionCache* exception_cache() const { return _exception_cache; } duke@435: void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } duke@435: address handler_for_exception_and_pc(Handle exception, address pc); duke@435: void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); duke@435: void remove_from_exception_cache(ExceptionCache* ec); duke@435: duke@435: // implicit exceptions support duke@435: address continuation_for_implicit_exception(address pc); duke@435: duke@435: // On-stack replacement support duke@435: int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; } duke@435: address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; } duke@435: void invalidate_osr_method(); jrose@1424: nmethod* osr_link() const { return _osr_link; } jrose@1424: void set_osr_link(nmethod *n) { _osr_link = n; } duke@435: duke@435: // tells whether frames described by this nmethod can be deoptimized duke@435: // note: native wrappers cannot be deoptimized. duke@435: bool can_be_deoptimized() const { return is_java_method(); } duke@435: duke@435: // Inline cache support duke@435: void clear_inline_caches(); duke@435: void cleanup_inline_caches(); duke@435: bool inlinecache_check_contains(address addr) const { twisti@2103: return (addr >= code_begin() && addr < verified_entry_point()); duke@435: } duke@435: duke@435: // unlink and deallocate this nmethod duke@435: // Only NMethodSweeper class is expected to use this. NMethodSweeper is not duke@435: // expected to use any other private methods/data in this class. duke@435: duke@435: protected: duke@435: void flush(); duke@435: duke@435: public: duke@435: // If returning true, it is unsafe to remove this nmethod even though it is a zombie duke@435: // nmethod, since the VM might have a reference to it. Should only be called from a safepoint. duke@435: bool is_locked_by_vm() const { return _lock_count >0; } duke@435: duke@435: // See comment at definition of _last_seen_on_stack duke@435: void mark_as_seen_on_stack(); duke@435: bool can_not_entrant_be_converted(); duke@435: duke@435: // Evolution support. We make old (discarded) compiled methods point to new methodOops. duke@435: void set_method(methodOop method) { _method = method; } duke@435: duke@435: // GC support duke@435: void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive, duke@435: bool unloading_occurred); duke@435: bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive, duke@435: oop* root, bool unloading_occurred); duke@435: duke@435: void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, duke@435: OopClosure* f); twisti@1918: void oops_do(OopClosure* f) { oops_do(f, false); } twisti@1918: void oops_do(OopClosure* f, bool do_strong_roots_only); jrose@1424: bool detect_scavenge_root_oops(); jrose@1424: void verify_scavenge_root_oops() PRODUCT_RETURN; jrose@1424: jrose@1424: bool test_set_oops_do_mark(); jrose@1424: static void oops_do_marking_prologue(); jrose@1424: static void oops_do_marking_epilogue(); jrose@1424: static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } jrose@1424: DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }) duke@435: duke@435: // ScopeDesc for an instruction duke@435: ScopeDesc* scope_desc_at(address pc); duke@435: duke@435: private: duke@435: ScopeDesc* scope_desc_in(address begin, address end); duke@435: twisti@1639: address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } duke@435: duke@435: PcDesc* find_pc_desc_internal(address pc, bool approximate); duke@435: duke@435: PcDesc* find_pc_desc(address pc, bool approximate) { duke@435: PcDesc* desc = _pc_desc_cache.last_pc_desc(); twisti@2103: if (desc != NULL && desc->pc_offset() == pc - code_begin()) { duke@435: return desc; duke@435: } duke@435: return find_pc_desc_internal(pc, approximate); duke@435: } duke@435: duke@435: public: duke@435: // ScopeDesc retrieval operation duke@435: PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } duke@435: // pc_desc_near returns the first PcDesc at or after the givne pc. duke@435: PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } duke@435: duke@435: public: duke@435: // copying of debugging information duke@435: void copy_scopes_pcs(PcDesc* pcs, int count); duke@435: void copy_scopes_data(address buffer, int size); duke@435: twisti@1639: // Deopt twisti@1639: // Return true is the PC is one would expect if the frame is being deopted. twisti@1639: bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } twisti@1639: bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); } twisti@1639: bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } duke@435: // Accessor/mutator for the original pc of a frame before a frame was deopted. duke@435: address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } duke@435: void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } duke@435: twisti@1639: static address get_deopt_original_pc(const frame* fr); twisti@1639: twisti@1570: // MethodHandle twisti@1570: bool is_method_handle_return(address return_pc); twisti@1570: duke@435: // jvmti support: duke@435: void post_compiled_method_load_event(); never@1971: jmethodID get_and_cache_jmethod_id(); duke@435: duke@435: // verify operations duke@435: void verify(); duke@435: void verify_scopes(); duke@435: void verify_interrupt_point(address interrupt_point); duke@435: iveresov@2138: // print compilation helper iveresov@2138: static void print_compilation(outputStream *st, const char *method_name, const char *title, iveresov@2138: methodOop method, bool is_blocking, int compile_id, int bci, int comp_level); iveresov@2138: duke@435: // printing support jrose@535: void print() const; jrose@535: void print_code(); duke@435: void print_relocations() PRODUCT_RETURN; duke@435: void print_pcs() PRODUCT_RETURN; duke@435: void print_scopes() PRODUCT_RETURN; duke@435: void print_dependencies() PRODUCT_RETURN; duke@435: void print_value_on(outputStream* st) const PRODUCT_RETURN; duke@435: void print_calls(outputStream* st) PRODUCT_RETURN; duke@435: void print_handler_table() PRODUCT_RETURN; duke@435: void print_nul_chk_table() PRODUCT_RETURN; jrose@535: void print_nmethod(bool print_code); duke@435: bobv@2036: // need to re-define this from CodeBlob else the overload hides it bobv@2036: virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } duke@435: void print_on(outputStream* st, const char* title) const; duke@435: duke@435: // Logging duke@435: void log_identity(xmlStream* log) const; duke@435: void log_new_nmethod() const; never@1544: void log_state_change() const; duke@435: jrose@1590: // Prints block-level comments, including nmethod specific block labels: jrose@1590: virtual void print_block_comment(outputStream* stream, address block_begin) { jrose@1590: print_nmethod_labels(stream, block_begin); jrose@1590: CodeBlob::print_block_comment(stream, block_begin); jrose@1590: } jrose@1590: void print_nmethod_labels(outputStream* stream, address block_begin); jrose@1590: duke@435: // Prints a comment for one native instruction (reloc info, pc desc) jrose@535: void print_code_comment_on(outputStream* st, int column, address begin, address end); duke@435: static void print_statistics() PRODUCT_RETURN; duke@435: duke@435: // Compiler task identification. Note that all OSR methods duke@435: // are numbered in an independent sequence if CICountOSR is true, duke@435: // and native method wrappers are also numbered independently if duke@435: // CICountNative is true. duke@435: int compile_id() const { return _compile_id; } duke@435: const char* compile_kind() const; duke@435: duke@435: // For debugging duke@435: // CompiledIC* IC_at(char* p) const; duke@435: // PrimitiveIC* primitiveIC_at(char* p) const; duke@435: oop embeddedOop_at(address p); duke@435: duke@435: // tells if any of this method's dependencies have been invalidated duke@435: // (this is expensive!) duke@435: bool check_all_dependencies(); duke@435: duke@435: // tells if this compiled method is dependent on the given changes, duke@435: // and the changes have invalidated it duke@435: bool check_dependency_on(DepChange& changes); duke@435: duke@435: // Evolution support. Tells if this compiled method is dependent on any of duke@435: // methods m() of class dependee, such that if m() in dependee is replaced, duke@435: // this compiled method will have to be deoptimized. duke@435: bool is_evol_dependent_on(klassOop dependee); duke@435: duke@435: // Fast breakpoint support. Tells if this compiled method is duke@435: // dependent on the given method. Returns true if this nmethod duke@435: // corresponds to the given method as well. duke@435: bool is_dependent_on_method(methodOop dependee); duke@435: duke@435: // is it ok to patch at address? duke@435: bool is_patchable_at(address instr_address); duke@435: duke@435: // UseBiasedLocking support kamg@2361: ByteSize native_receiver_sp_offset() { kamg@2361: return _native_receiver_sp_offset; duke@435: } kamg@2361: ByteSize native_basic_lock_sp_offset() { kamg@2361: return _native_basic_lock_sp_offset; duke@435: } duke@435: duke@435: // support for code generation duke@435: static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } duke@435: static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } duke@435: static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); } duke@435: duke@435: }; duke@435: duke@435: // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method duke@435: class nmethodLocker : public StackObj { duke@435: nmethod* _nm; duke@435: kamg@2511: public: kamg@2511: duke@435: static void lock_nmethod(nmethod* nm); // note: nm can be NULL duke@435: static void unlock_nmethod(nmethod* nm); // (ditto) duke@435: duke@435: nmethodLocker(address pc); // derive nm from pc duke@435: nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } duke@435: nmethodLocker() { _nm = NULL; } duke@435: ~nmethodLocker() { unlock_nmethod(_nm); } duke@435: duke@435: nmethod* code() { return _nm; } duke@435: void set_code(nmethod* new_nm) { duke@435: unlock_nmethod(_nm); // note: This works even if _nm==new_nm. duke@435: _nm = new_nm; duke@435: lock_nmethod(_nm); duke@435: } duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_CODE_NMETHOD_HPP