duke@435: /* xdono@631: * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // This class is used internally by nmethods, to cache duke@435: // exception/pc/handler information. duke@435: duke@435: class ExceptionCache : public CHeapObj { duke@435: friend class VMStructs; duke@435: private: duke@435: static address _unwind_handler; duke@435: enum { cache_size = 16 }; duke@435: klassOop _exception_type; duke@435: address _pc[cache_size]; duke@435: address _handler[cache_size]; duke@435: int _count; duke@435: ExceptionCache* _next; duke@435: duke@435: address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } duke@435: void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } duke@435: address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } duke@435: void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } duke@435: int count() { return _count; } duke@435: void increment_count() { _count++; } duke@435: duke@435: public: duke@435: duke@435: ExceptionCache(Handle exception, address pc, address handler); duke@435: duke@435: klassOop exception_type() { return _exception_type; } duke@435: klassOop* exception_type_addr() { return &_exception_type; } duke@435: ExceptionCache* next() { return _next; } duke@435: void set_next(ExceptionCache *ec) { _next = ec; } duke@435: duke@435: address match(Handle exception, address pc); duke@435: bool match_exception_with_space(Handle exception) ; duke@435: address test_address(address addr); duke@435: bool add_address_and_handler(address addr, address handler) ; duke@435: duke@435: static address unwind_handler() { return _unwind_handler; } duke@435: }; duke@435: duke@435: duke@435: // cache pc descs found in earlier inquiries duke@435: class PcDescCache VALUE_OBJ_CLASS_SPEC { duke@435: friend class VMStructs; duke@435: private: duke@435: enum { cache_size = 4 }; duke@435: PcDesc* _last_pc_desc; // most recent pc_desc found duke@435: PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found duke@435: public: duke@435: PcDescCache() { debug_only(_last_pc_desc = NULL); } duke@435: void reset_to(PcDesc* initial_pc_desc); duke@435: PcDesc* find_pc_desc(int pc_offset, bool approximate); duke@435: void add_pc_desc(PcDesc* pc_desc); duke@435: PcDesc* last_pc_desc() { return _last_pc_desc; } duke@435: }; duke@435: duke@435: duke@435: // nmethods (native methods) are the compiled code versions of Java methods. duke@435: duke@435: struct nmFlags { duke@435: friend class VMStructs; duke@435: unsigned int version:8; // version number (0 = first version) duke@435: unsigned int level:4; // optimization level duke@435: unsigned int age:4; // age (in # of sweep steps) duke@435: duke@435: unsigned int state:2; // {alive, zombie, unloaded) duke@435: duke@435: unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap? duke@435: unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures duke@435: unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies duke@435: unsigned int markedForReclamation:1; // Used by NMethodSweeper duke@435: duke@435: unsigned int has_unsafe_access:1; // May fault due to unsafe access. duke@435: duke@435: void clear(); duke@435: }; duke@435: duke@435: duke@435: // A nmethod contains: duke@435: // - header (the nmethod structure) duke@435: // [Relocation] duke@435: // - relocation information duke@435: // - constant part (doubles, longs and floats used in nmethod) duke@435: // [Code] duke@435: // - code body duke@435: // - exception handler duke@435: // - stub code duke@435: // [Debugging information] duke@435: // - oop array duke@435: // - data array duke@435: // - pcs duke@435: // [Exception handler table] duke@435: // - handler entry point array duke@435: // [Implicit Null Pointer exception table] duke@435: // - implicit null table array duke@435: duke@435: class Dependencies; duke@435: class ExceptionHandlerTable; duke@435: class ImplicitExceptionTable; duke@435: class AbstractCompiler; duke@435: class xmlStream; duke@435: duke@435: class nmethod : public CodeBlob { duke@435: friend class VMStructs; duke@435: friend class NMethodSweeper; duke@435: private: duke@435: // Shared fields for all nmethod's duke@435: static int _zombie_instruction_size; duke@435: duke@435: methodOop _method; duke@435: int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method duke@435: duke@435: nmethod* _link; // To support simple linked-list chaining of nmethods duke@435: duke@435: AbstractCompiler* _compiler; // The compiler which compiled this nmethod duke@435: duke@435: // Offsets for different nmethod parts duke@435: int _exception_offset; duke@435: // All deoptee's will resume execution at this location described by this offset duke@435: int _deoptimize_offset; kamg@551: #ifdef HAVE_DTRACE_H kamg@551: int _trap_offset; kamg@551: #endif // def HAVE_DTRACE_H duke@435: int _stub_offset; duke@435: int _consts_offset; duke@435: int _scopes_data_offset; duke@435: int _scopes_pcs_offset; duke@435: int _dependencies_offset; duke@435: int _handler_table_offset; duke@435: int _nul_chk_table_offset; duke@435: int _nmethod_end_offset; duke@435: duke@435: // location in frame (offset for sp) that deopt can store the original duke@435: // pc during a deopt. duke@435: int _orig_pc_offset; duke@435: duke@435: int _compile_id; // which compilation made this nmethod duke@435: int _comp_level; // compilation level duke@435: duke@435: // offsets for entry points duke@435: address _entry_point; // entry point with class check duke@435: address _verified_entry_point; // entry point without class check duke@435: address _osr_entry_point; // entry point for on stack replacement duke@435: duke@435: nmFlags flags; // various flags to keep track of nmethod state duke@435: bool _markedForDeoptimization; // Used for stack deoptimization duke@435: enum { alive = 0, twisti@1040: not_entrant = 1, // uncommon trap has happened but activations may still exist duke@435: zombie = 2, duke@435: unloaded = 3 }; duke@435: duke@435: // used by jvmti to track if an unload event has been posted for this nmethod. duke@435: bool _unload_reported; duke@435: duke@435: NOT_PRODUCT(bool _has_debug_info; ) duke@435: duke@435: // Nmethod Flushing lock (if non-zero, then the nmethod is not removed) duke@435: jint _lock_count; duke@435: duke@435: // not_entrant method removal. Each mark_sweep pass will update duke@435: // this mark to current sweep invocation count if it is seen on the duke@435: // stack. An not_entrant method can be removed when there is no duke@435: // more activations, i.e., when the _stack_traversal_mark is less than duke@435: // current sweep traversal index. duke@435: long _stack_traversal_mark; duke@435: duke@435: ExceptionCache *_exception_cache; duke@435: PcDescCache _pc_desc_cache; duke@435: duke@435: // These are only used for compiled synchronized native methods to duke@435: // locate the owner and stack slot for the BasicLock so that we can duke@435: // properly revoke the bias of the owner if necessary. They are duke@435: // needed because there is no debug information for compiled native duke@435: // wrappers and the oop maps are insufficient to allow duke@435: // frame::retrieve_receiver() to work. Currently they are expected duke@435: // to be byte offsets from the Java stack pointer for maximum code duke@435: // sharing between platforms. Note that currently biased locking duke@435: // will never cause Class instances to be biased but this code duke@435: // handles the static synchronized case as well. duke@435: ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset; duke@435: ByteSize _compiled_synchronized_native_basic_lock_sp_offset; duke@435: duke@435: friend class nmethodLocker; duke@435: duke@435: // For native wrappers duke@435: nmethod(methodOop method, duke@435: int nmethod_size, duke@435: CodeOffsets* offsets, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ duke@435: ByteSize basic_lock_sp_offset, /* synchronized natives only */ duke@435: OopMapSet* oop_maps); duke@435: kamg@551: #ifdef HAVE_DTRACE_H kamg@551: // For native wrappers kamg@551: nmethod(methodOop method, kamg@551: int nmethod_size, kamg@551: CodeOffsets* offsets, kamg@551: CodeBuffer *code_buffer, kamg@551: int frame_size); kamg@551: #endif // def HAVE_DTRACE_H kamg@551: duke@435: // Creation support duke@435: nmethod(methodOop method, duke@435: int nmethod_size, duke@435: int compile_id, duke@435: int entry_bci, duke@435: CodeOffsets* offsets, duke@435: int orig_pc_offset, duke@435: DebugInformationRecorder *recorder, duke@435: Dependencies* dependencies, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: OopMapSet* oop_maps, duke@435: ExceptionHandlerTable* handler_table, duke@435: ImplicitExceptionTable* nul_chk_table, duke@435: AbstractCompiler* compiler, duke@435: int comp_level); duke@435: duke@435: // helper methods duke@435: void* operator new(size_t size, int nmethod_size); duke@435: void check_store(); duke@435: duke@435: const char* reloc_string_for(u_char* begin, u_char* end); duke@435: void make_not_entrant_or_zombie(int state); duke@435: void inc_decompile_count(); duke@435: duke@435: // used to check that writes to nmFlags are done consistently. duke@435: static void check_safepoint() PRODUCT_RETURN; duke@435: duke@435: // Used to manipulate the exception cache duke@435: void add_exception_cache_entry(ExceptionCache* new_entry); duke@435: ExceptionCache* exception_cache_entry_for_exception(Handle exception); duke@435: duke@435: // Inform external interfaces that a compiled method has been unloaded duke@435: inline void post_compiled_method_unload(); duke@435: duke@435: public: duke@435: // create nmethod with entry_bci duke@435: static nmethod* new_nmethod(methodHandle method, duke@435: int compile_id, duke@435: int entry_bci, duke@435: CodeOffsets* offsets, duke@435: int orig_pc_offset, duke@435: DebugInformationRecorder* recorder, duke@435: Dependencies* dependencies, duke@435: CodeBuffer *code_buffer, duke@435: int frame_size, duke@435: OopMapSet* oop_maps, duke@435: ExceptionHandlerTable* handler_table, duke@435: ImplicitExceptionTable* nul_chk_table, duke@435: AbstractCompiler* compiler, duke@435: int comp_level); duke@435: duke@435: static nmethod* new_native_nmethod(methodHandle method, duke@435: CodeBuffer *code_buffer, duke@435: int vep_offset, duke@435: int frame_complete, duke@435: int frame_size, duke@435: ByteSize receiver_sp_offset, duke@435: ByteSize basic_lock_sp_offset, duke@435: OopMapSet* oop_maps); duke@435: kamg@551: #ifdef HAVE_DTRACE_H kamg@551: // The method we generate for a dtrace probe has to look kamg@551: // like an nmethod as far as the rest of the system is concerned kamg@551: // which is somewhat unfortunate. kamg@551: static nmethod* new_dtrace_nmethod(methodHandle method, kamg@551: CodeBuffer *code_buffer, kamg@551: int vep_offset, kamg@551: int trap_offset, kamg@551: int frame_complete, kamg@551: int frame_size); kamg@551: kamg@551: int trap_offset() const { return _trap_offset; } kamg@551: address trap_address() const { return code_begin() + _trap_offset; } kamg@551: kamg@551: #endif // def HAVE_DTRACE_H kamg@551: duke@435: // accessors duke@435: methodOop method() const { return _method; } duke@435: AbstractCompiler* compiler() const { return _compiler; } duke@435: duke@435: #ifndef PRODUCT duke@435: bool has_debug_info() const { return _has_debug_info; } duke@435: void set_has_debug_info(bool f) { _has_debug_info = false; } duke@435: #endif // NOT PRODUCT duke@435: duke@435: // type info duke@435: bool is_nmethod() const { return true; } duke@435: bool is_java_method() const { return !method()->is_native(); } duke@435: bool is_native_method() const { return method()->is_native(); } duke@435: bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } duke@435: bool is_osr_only_method() const { return is_osr_method(); } duke@435: duke@435: bool is_compiled_by_c1() const; duke@435: bool is_compiled_by_c2() const; duke@435: duke@435: // boundaries for different parts duke@435: address code_begin () const { return _entry_point; } duke@435: address code_end () const { return header_begin() + _stub_offset ; } duke@435: address exception_begin () const { return header_begin() + _exception_offset ; } duke@435: address deopt_handler_begin() const { return header_begin() + _deoptimize_offset ; } duke@435: address stub_begin () const { return header_begin() + _stub_offset ; } duke@435: address stub_end () const { return header_begin() + _consts_offset ; } duke@435: address consts_begin () const { return header_begin() + _consts_offset ; } duke@435: address consts_end () const { return header_begin() + _scopes_data_offset ; } duke@435: address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } duke@435: address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } duke@435: PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } duke@435: PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset); } duke@435: address dependencies_begin () const { return header_begin() + _dependencies_offset ; } duke@435: address dependencies_end () const { return header_begin() + _handler_table_offset ; } duke@435: address handler_table_begin() const { return header_begin() + _handler_table_offset ; } duke@435: address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } duke@435: address nul_chk_table_begin() const { return header_begin() + _nul_chk_table_offset ; } duke@435: address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } duke@435: duke@435: int code_size () const { return code_end () - code_begin (); } duke@435: int stub_size () const { return stub_end () - stub_begin (); } duke@435: int consts_size () const { return consts_end () - consts_begin (); } duke@435: int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } duke@435: int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); } duke@435: int dependencies_size () const { return dependencies_end () - dependencies_begin (); } duke@435: int handler_table_size() const { return handler_table_end() - handler_table_begin(); } duke@435: int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } duke@435: duke@435: int total_size () const; duke@435: duke@435: bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); } duke@435: bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } duke@435: bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } duke@435: bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } duke@435: bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } duke@435: bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } duke@435: bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } duke@435: duke@435: // entry points duke@435: address entry_point() const { return _entry_point; } // normal entry point duke@435: address verified_entry_point() const { return _verified_entry_point; } // if klass is correct duke@435: duke@435: // flag accessing and manipulation duke@435: bool is_in_use() const { return flags.state == alive; } duke@435: bool is_alive() const { return flags.state == alive || flags.state == not_entrant; } duke@435: bool is_not_entrant() const { return flags.state == not_entrant; } duke@435: bool is_zombie() const { return flags.state == zombie; } duke@435: bool is_unloaded() const { return flags.state == unloaded; } duke@435: duke@435: // Make the nmethod non entrant. The nmethod will continue to be alive. duke@435: // It is used when an uncommon trap happens. duke@435: void make_not_entrant() { make_not_entrant_or_zombie(not_entrant); } duke@435: void make_zombie() { make_not_entrant_or_zombie(zombie); } duke@435: duke@435: // used by jvmti to track if the unload event has been reported duke@435: bool unload_reported() { return _unload_reported; } duke@435: void set_unload_reported() { _unload_reported = true; } duke@435: duke@435: bool is_marked_for_deoptimization() const { return _markedForDeoptimization; } duke@435: void mark_for_deoptimization() { _markedForDeoptimization = true; } duke@435: duke@435: void make_unloaded(BoolObjectClosure* is_alive, oop cause); duke@435: duke@435: bool has_dependencies() { return dependencies_size() != 0; } duke@435: void flush_dependencies(BoolObjectClosure* is_alive); duke@435: bool has_flushed_dependencies() { return flags.hasFlushedDependencies; } duke@435: void set_has_flushed_dependencies() { duke@435: check_safepoint(); duke@435: assert(!has_flushed_dependencies(), "should only happen once"); duke@435: flags.hasFlushedDependencies = 1; duke@435: } duke@435: duke@435: bool is_marked_for_reclamation() const { return flags.markedForReclamation; } duke@435: void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; } duke@435: void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; } duke@435: duke@435: bool has_unsafe_access() const { return flags.has_unsafe_access; } duke@435: void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; } duke@435: duke@435: int level() const { return flags.level; } duke@435: void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; } duke@435: duke@435: int comp_level() const { return _comp_level; } duke@435: duke@435: int version() const { return flags.version; } duke@435: void set_version(int v); duke@435: duke@435: // Sweeper support duke@435: long stack_traversal_mark() { return _stack_traversal_mark; } duke@435: void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } duke@435: duke@435: // Exception cache support duke@435: ExceptionCache* exception_cache() const { return _exception_cache; } duke@435: void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } duke@435: address handler_for_exception_and_pc(Handle exception, address pc); duke@435: void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); duke@435: void remove_from_exception_cache(ExceptionCache* ec); duke@435: duke@435: // implicit exceptions support duke@435: address continuation_for_implicit_exception(address pc); duke@435: duke@435: // On-stack replacement support duke@435: int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; } duke@435: address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; } duke@435: void invalidate_osr_method(); duke@435: nmethod* link() const { return _link; } duke@435: void set_link(nmethod *n) { _link = n; } duke@435: duke@435: // tells whether frames described by this nmethod can be deoptimized duke@435: // note: native wrappers cannot be deoptimized. duke@435: bool can_be_deoptimized() const { return is_java_method(); } duke@435: duke@435: // Inline cache support duke@435: void clear_inline_caches(); duke@435: void cleanup_inline_caches(); duke@435: bool inlinecache_check_contains(address addr) const { duke@435: return (addr >= instructions_begin() && addr < verified_entry_point()); duke@435: } duke@435: duke@435: // unlink and deallocate this nmethod duke@435: // Only NMethodSweeper class is expected to use this. NMethodSweeper is not duke@435: // expected to use any other private methods/data in this class. duke@435: duke@435: protected: duke@435: void flush(); duke@435: duke@435: public: duke@435: // If returning true, it is unsafe to remove this nmethod even though it is a zombie duke@435: // nmethod, since the VM might have a reference to it. Should only be called from a safepoint. duke@435: bool is_locked_by_vm() const { return _lock_count >0; } duke@435: duke@435: // See comment at definition of _last_seen_on_stack duke@435: void mark_as_seen_on_stack(); duke@435: bool can_not_entrant_be_converted(); duke@435: duke@435: // Evolution support. We make old (discarded) compiled methods point to new methodOops. duke@435: void set_method(methodOop method) { _method = method; } duke@435: duke@435: // GC support duke@435: void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive, duke@435: bool unloading_occurred); duke@435: bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive, duke@435: oop* root, bool unloading_occurred); duke@435: duke@435: void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, duke@435: OopClosure* f); duke@435: void oops_do(OopClosure* f); duke@435: duke@435: // ScopeDesc for an instruction duke@435: ScopeDesc* scope_desc_at(address pc); duke@435: duke@435: private: duke@435: ScopeDesc* scope_desc_in(address begin, address end); duke@435: duke@435: address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } duke@435: duke@435: PcDesc* find_pc_desc_internal(address pc, bool approximate); duke@435: duke@435: PcDesc* find_pc_desc(address pc, bool approximate) { duke@435: PcDesc* desc = _pc_desc_cache.last_pc_desc(); duke@435: if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) { duke@435: return desc; duke@435: } duke@435: return find_pc_desc_internal(pc, approximate); duke@435: } duke@435: duke@435: public: duke@435: // ScopeDesc retrieval operation duke@435: PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } duke@435: // pc_desc_near returns the first PcDesc at or after the givne pc. duke@435: PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } duke@435: duke@435: public: duke@435: // copying of debugging information duke@435: void copy_scopes_pcs(PcDesc* pcs, int count); duke@435: void copy_scopes_data(address buffer, int size); duke@435: duke@435: // deopt duke@435: // return true is the pc is one would expect if the frame is being deopted. duke@435: bool is_deopt_pc(address pc); duke@435: // Accessor/mutator for the original pc of a frame before a frame was deopted. duke@435: address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } duke@435: void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } duke@435: duke@435: // jvmti support: duke@435: void post_compiled_method_load_event(); duke@435: duke@435: // verify operations duke@435: void verify(); duke@435: void verify_scopes(); duke@435: void verify_interrupt_point(address interrupt_point); duke@435: duke@435: // printing support jrose@535: void print() const; jrose@535: void print_code(); duke@435: void print_relocations() PRODUCT_RETURN; duke@435: void print_pcs() PRODUCT_RETURN; duke@435: void print_scopes() PRODUCT_RETURN; duke@435: void print_dependencies() PRODUCT_RETURN; duke@435: void print_value_on(outputStream* st) const PRODUCT_RETURN; duke@435: void print_calls(outputStream* st) PRODUCT_RETURN; duke@435: void print_handler_table() PRODUCT_RETURN; duke@435: void print_nul_chk_table() PRODUCT_RETURN; jrose@535: void print_nmethod(bool print_code); duke@435: duke@435: void print_on(outputStream* st, const char* title) const; duke@435: duke@435: // Logging duke@435: void log_identity(xmlStream* log) const; duke@435: void log_new_nmethod() const; duke@435: void log_state_change(int state) const; duke@435: duke@435: // Prints a comment for one native instruction (reloc info, pc desc) jrose@535: void print_code_comment_on(outputStream* st, int column, address begin, address end); duke@435: static void print_statistics() PRODUCT_RETURN; duke@435: duke@435: // Compiler task identification. Note that all OSR methods duke@435: // are numbered in an independent sequence if CICountOSR is true, duke@435: // and native method wrappers are also numbered independently if duke@435: // CICountNative is true. duke@435: int compile_id() const { return _compile_id; } duke@435: const char* compile_kind() const; duke@435: duke@435: // For debugging duke@435: // CompiledIC* IC_at(char* p) const; duke@435: // PrimitiveIC* primitiveIC_at(char* p) const; duke@435: oop embeddedOop_at(address p); duke@435: duke@435: // tells if any of this method's dependencies have been invalidated duke@435: // (this is expensive!) duke@435: bool check_all_dependencies(); duke@435: duke@435: // tells if this compiled method is dependent on the given changes, duke@435: // and the changes have invalidated it duke@435: bool check_dependency_on(DepChange& changes); duke@435: duke@435: // Evolution support. Tells if this compiled method is dependent on any of duke@435: // methods m() of class dependee, such that if m() in dependee is replaced, duke@435: // this compiled method will have to be deoptimized. duke@435: bool is_evol_dependent_on(klassOop dependee); duke@435: duke@435: // Fast breakpoint support. Tells if this compiled method is duke@435: // dependent on the given method. Returns true if this nmethod duke@435: // corresponds to the given method as well. duke@435: bool is_dependent_on_method(methodOop dependee); duke@435: duke@435: // is it ok to patch at address? duke@435: bool is_patchable_at(address instr_address); duke@435: duke@435: // UseBiasedLocking support duke@435: ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() { duke@435: return _compiled_synchronized_native_basic_lock_owner_sp_offset; duke@435: } duke@435: ByteSize compiled_synchronized_native_basic_lock_sp_offset() { duke@435: return _compiled_synchronized_native_basic_lock_sp_offset; duke@435: } duke@435: duke@435: // support for code generation duke@435: static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } duke@435: static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } duke@435: static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); } duke@435: duke@435: }; duke@435: duke@435: // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method duke@435: class nmethodLocker : public StackObj { duke@435: nmethod* _nm; duke@435: duke@435: static void lock_nmethod(nmethod* nm); // note: nm can be NULL duke@435: static void unlock_nmethod(nmethod* nm); // (ditto) duke@435: duke@435: public: duke@435: nmethodLocker(address pc); // derive nm from pc duke@435: nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } duke@435: nmethodLocker() { _nm = NULL; } duke@435: ~nmethodLocker() { unlock_nmethod(_nm); } duke@435: duke@435: nmethod* code() { return _nm; } duke@435: void set_code(nmethod* new_nm) { duke@435: unlock_nmethod(_nm); // note: This works even if _nm==new_nm. duke@435: _nm = new_nm; duke@435: lock_nmethod(_nm); duke@435: } duke@435: };