Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
duke@435 | 1 | /* |
coleenp@5614 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_CODE_NMETHOD_HPP |
stefank@2314 | 26 | #define SHARE_VM_CODE_NMETHOD_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "code/codeBlob.hpp" |
stefank@2314 | 29 | #include "code/pcDesc.hpp" |
coleenp@4037 | 30 | #include "oops/metadata.hpp" |
stefank@2314 | 31 | |
duke@435 | 32 | // This class is used internally by nmethods, to cache |
duke@435 | 33 | // exception/pc/handler information. |
duke@435 | 34 | |
zgu@3900 | 35 | class ExceptionCache : public CHeapObj<mtCode> { |
duke@435 | 36 | friend class VMStructs; |
duke@435 | 37 | private: |
duke@435 | 38 | enum { cache_size = 16 }; |
coleenp@4037 | 39 | Klass* _exception_type; |
duke@435 | 40 | address _pc[cache_size]; |
duke@435 | 41 | address _handler[cache_size]; |
duke@435 | 42 | int _count; |
duke@435 | 43 | ExceptionCache* _next; |
duke@435 | 44 | |
duke@435 | 45 | address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } |
duke@435 | 46 | void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } |
duke@435 | 47 | address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } |
duke@435 | 48 | void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } |
duke@435 | 49 | int count() { return _count; } |
duke@435 | 50 | void increment_count() { _count++; } |
duke@435 | 51 | |
duke@435 | 52 | public: |
duke@435 | 53 | |
duke@435 | 54 | ExceptionCache(Handle exception, address pc, address handler); |
duke@435 | 55 | |
coleenp@4037 | 56 | Klass* exception_type() { return _exception_type; } |
duke@435 | 57 | ExceptionCache* next() { return _next; } |
duke@435 | 58 | void set_next(ExceptionCache *ec) { _next = ec; } |
duke@435 | 59 | |
duke@435 | 60 | address match(Handle exception, address pc); |
duke@435 | 61 | bool match_exception_with_space(Handle exception) ; |
duke@435 | 62 | address test_address(address addr); |
duke@435 | 63 | bool add_address_and_handler(address addr, address handler) ; |
duke@435 | 64 | }; |
duke@435 | 65 | |
duke@435 | 66 | |
duke@435 | 67 | // cache pc descs found in earlier inquiries |
duke@435 | 68 | class PcDescCache VALUE_OBJ_CLASS_SPEC { |
duke@435 | 69 | friend class VMStructs; |
duke@435 | 70 | private: |
duke@435 | 71 | enum { cache_size = 4 }; |
mdoerr@6941 | 72 | // The array elements MUST be volatile! Several threads may modify |
mdoerr@6941 | 73 | // and read from the cache concurrently. find_pc_desc_internal has |
mdoerr@6941 | 74 | // returned wrong results. C++ compiler (namely xlC12) may duplicate |
mdoerr@6941 | 75 | // C++ field accesses if the elements are not volatile. |
mdoerr@6941 | 76 | typedef PcDesc* PcDescPtr; |
mdoerr@6941 | 77 | volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found |
duke@435 | 78 | public: |
phh@2621 | 79 | PcDescCache() { debug_only(_pc_descs[0] = NULL); } |
duke@435 | 80 | void reset_to(PcDesc* initial_pc_desc); |
duke@435 | 81 | PcDesc* find_pc_desc(int pc_offset, bool approximate); |
duke@435 | 82 | void add_pc_desc(PcDesc* pc_desc); |
phh@2621 | 83 | PcDesc* last_pc_desc() { return _pc_descs[0]; } |
duke@435 | 84 | }; |
duke@435 | 85 | |
duke@435 | 86 | |
duke@435 | 87 | // nmethods (native methods) are the compiled code versions of Java methods. |
never@1999 | 88 | // |
never@1999 | 89 | // An nmethod contains: |
duke@435 | 90 | // - header (the nmethod structure) |
duke@435 | 91 | // [Relocation] |
duke@435 | 92 | // - relocation information |
duke@435 | 93 | // - constant part (doubles, longs and floats used in nmethod) |
twisti@1918 | 94 | // - oop table |
duke@435 | 95 | // [Code] |
duke@435 | 96 | // - code body |
duke@435 | 97 | // - exception handler |
duke@435 | 98 | // - stub code |
duke@435 | 99 | // [Debugging information] |
duke@435 | 100 | // - oop array |
duke@435 | 101 | // - data array |
duke@435 | 102 | // - pcs |
duke@435 | 103 | // [Exception handler table] |
duke@435 | 104 | // - handler entry point array |
duke@435 | 105 | // [Implicit Null Pointer exception table] |
duke@435 | 106 | // - implicit null table array |
duke@435 | 107 | |
duke@435 | 108 | class Dependencies; |
duke@435 | 109 | class ExceptionHandlerTable; |
duke@435 | 110 | class ImplicitExceptionTable; |
duke@435 | 111 | class AbstractCompiler; |
duke@435 | 112 | class xmlStream; |
duke@435 | 113 | |
duke@435 | 114 | class nmethod : public CodeBlob { |
duke@435 | 115 | friend class VMStructs; |
duke@435 | 116 | friend class NMethodSweeper; |
jmasa@2909 | 117 | friend class CodeCache; // scavengable oops |
duke@435 | 118 | private: |
stefank@6992 | 119 | |
stefank@6992 | 120 | // GC support to help figure out if an nmethod has been |
stefank@6992 | 121 | // cleaned/unloaded by the current GC. |
stefank@6992 | 122 | static unsigned char _global_unloading_clock; |
stefank@6992 | 123 | |
duke@435 | 124 | // Shared fields for all nmethod's |
coleenp@4037 | 125 | Method* _method; |
duke@435 | 126 | int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method |
never@1971 | 127 | jmethodID _jmethod_id; // Cache of method()->jmethod_id() |
duke@435 | 128 | |
jrose@1424 | 129 | // To support simple linked-list chaining of nmethods: |
coleenp@4037 | 130 | nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head |
stefank@6992 | 131 | |
stefank@6992 | 132 | union { |
stefank@6992 | 133 | // Used by G1 to chain nmethods. |
stefank@6992 | 134 | nmethod* _unloading_next; |
stefank@6992 | 135 | // Used by non-G1 GCs to chain nmethods. |
stefank@6992 | 136 | nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods |
stefank@6992 | 137 | }; |
jrose@1424 | 138 | |
jrose@1424 | 139 | static nmethod* volatile _oops_do_mark_nmethods; |
jrose@1424 | 140 | nmethod* volatile _oops_do_mark_link; |
duke@435 | 141 | |
duke@435 | 142 | AbstractCompiler* _compiler; // The compiler which compiled this nmethod |
duke@435 | 143 | |
never@1999 | 144 | // offsets for entry points |
never@1999 | 145 | address _entry_point; // entry point with class check |
never@1999 | 146 | address _verified_entry_point; // entry point without class check |
never@1999 | 147 | address _osr_entry_point; // entry point for on stack replacement |
never@1999 | 148 | |
duke@435 | 149 | // Offsets for different nmethod parts |
duke@435 | 150 | int _exception_offset; |
twisti@1639 | 151 | // All deoptee's will resume execution at this location described by |
twisti@1639 | 152 | // this offset. |
duke@435 | 153 | int _deoptimize_offset; |
twisti@1639 | 154 | // All deoptee's at a MethodHandle call site will resume execution |
twisti@1639 | 155 | // at this location described by this offset. |
twisti@1639 | 156 | int _deoptimize_mh_offset; |
never@1813 | 157 | // Offset of the unwind handler if it exists |
never@1813 | 158 | int _unwind_handler_offset; |
never@1813 | 159 | |
kamg@551 | 160 | #ifdef HAVE_DTRACE_H |
kamg@551 | 161 | int _trap_offset; |
kamg@551 | 162 | #endif // def HAVE_DTRACE_H |
twisti@2117 | 163 | int _consts_offset; |
duke@435 | 164 | int _stub_offset; |
twisti@1918 | 165 | int _oops_offset; // offset to where embedded oop table begins (inside data) |
coleenp@4037 | 166 | int _metadata_offset; // embedded meta data table |
duke@435 | 167 | int _scopes_data_offset; |
duke@435 | 168 | int _scopes_pcs_offset; |
duke@435 | 169 | int _dependencies_offset; |
duke@435 | 170 | int _handler_table_offset; |
duke@435 | 171 | int _nul_chk_table_offset; |
duke@435 | 172 | int _nmethod_end_offset; |
duke@435 | 173 | |
duke@435 | 174 | // location in frame (offset for sp) that deopt can store the original |
duke@435 | 175 | // pc during a deopt. |
duke@435 | 176 | int _orig_pc_offset; |
duke@435 | 177 | |
never@1999 | 178 | int _compile_id; // which compilation made this nmethod |
never@1999 | 179 | int _comp_level; // compilation level |
duke@435 | 180 | |
never@1999 | 181 | // protected by CodeCache_lock |
never@1999 | 182 | bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) |
duke@435 | 183 | |
never@1999 | 184 | bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper) |
never@1999 | 185 | bool _marked_for_deoptimization; // Used for stack deoptimization |
never@1999 | 186 | |
never@1999 | 187 | // used by jvmti to track if an unload event has been posted for this nmethod. |
never@1999 | 188 | bool _unload_reported; |
never@1999 | 189 | |
never@1999 | 190 | // set during construction |
never@1999 | 191 | unsigned int _has_unsafe_access:1; // May fault due to unsafe access. |
never@1999 | 192 | unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? |
never@3500 | 193 | unsigned int _lazy_critical_native:1; // Lazy JNI critical native |
kvn@4103 | 194 | unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints |
never@1999 | 195 | |
never@1999 | 196 | // Protected by Patching_lock |
anoll@5792 | 197 | volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} |
never@1999 | 198 | |
stefank@6992 | 199 | volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod |
stefank@6992 | 200 | |
never@2081 | 201 | #ifdef ASSERT |
never@2081 | 202 | bool _oops_are_stale; // indicates that it's no longer safe to access oops section |
never@2081 | 203 | #endif |
never@2081 | 204 | |
kvn@6172 | 205 | enum { in_use = 0, // executable nmethod |
kvn@6172 | 206 | not_entrant = 1, // marked for deoptimization but activations may still exist, |
kvn@6172 | 207 | // will be transformed to zombie when all activations are gone |
kvn@6172 | 208 | zombie = 2, // no activations exist, nmethod is ready for purge |
kvn@6172 | 209 | unloaded = 3 }; // there should be no activations, should not be called, |
kvn@6172 | 210 | // will be transformed to zombie immediately |
duke@435 | 211 | |
jrose@1424 | 212 | jbyte _scavenge_root_state; |
jrose@1424 | 213 | |
kvn@6429 | 214 | #if INCLUDE_RTM_OPT |
kvn@6429 | 215 | // RTM state at compile time. Used during deoptimization to decide |
kvn@6429 | 216 | // whether to restart collecting RTM locking abort statistic again. |
kvn@6429 | 217 | RTMState _rtm_state; |
kvn@6429 | 218 | #endif |
kvn@6429 | 219 | |
dcubed@2624 | 220 | // Nmethod Flushing lock. If non-zero, then the nmethod is not removed |
dcubed@2624 | 221 | // and is not made into a zombie. However, once the nmethod is made into |
dcubed@2624 | 222 | // a zombie, it will be locked one final time if CompiledMethodUnload |
dcubed@2624 | 223 | // event processing needs to be done. |
duke@435 | 224 | jint _lock_count; |
duke@435 | 225 | |
duke@435 | 226 | // not_entrant method removal. Each mark_sweep pass will update |
duke@435 | 227 | // this mark to current sweep invocation count if it is seen on the |
anoll@5792 | 228 | // stack. An not_entrant method can be removed when there are no |
duke@435 | 229 | // more activations, i.e., when the _stack_traversal_mark is less than |
duke@435 | 230 | // current sweep traversal index. |
duke@435 | 231 | long _stack_traversal_mark; |
duke@435 | 232 | |
anoll@5792 | 233 | // The _hotness_counter indicates the hotness of a method. The higher |
anoll@5792 | 234 | // the value the hotter the method. The hotness counter of a nmethod is |
anoll@5792 | 235 | // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method |
anoll@5792 | 236 | // is active while stack scanning (mark_active_nmethods()). The hotness |
anoll@5792 | 237 | // counter is decreased (by 1) while sweeping. |
anoll@5792 | 238 | int _hotness_counter; |
anoll@5792 | 239 | |
duke@435 | 240 | ExceptionCache *_exception_cache; |
duke@435 | 241 | PcDescCache _pc_desc_cache; |
duke@435 | 242 | |
kamg@2361 | 243 | // These are used for compiled synchronized native methods to |
duke@435 | 244 | // locate the owner and stack slot for the BasicLock so that we can |
duke@435 | 245 | // properly revoke the bias of the owner if necessary. They are |
duke@435 | 246 | // needed because there is no debug information for compiled native |
duke@435 | 247 | // wrappers and the oop maps are insufficient to allow |
duke@435 | 248 | // frame::retrieve_receiver() to work. Currently they are expected |
duke@435 | 249 | // to be byte offsets from the Java stack pointer for maximum code |
duke@435 | 250 | // sharing between platforms. Note that currently biased locking |
duke@435 | 251 | // will never cause Class instances to be biased but this code |
duke@435 | 252 | // handles the static synchronized case as well. |
kamg@2361 | 253 | // JVMTI's GetLocalInstance() also uses these offsets to find the receiver |
kamg@2361 | 254 | // for non-static native wrapper frames. |
kamg@2361 | 255 | ByteSize _native_receiver_sp_offset; |
kamg@2361 | 256 | ByteSize _native_basic_lock_sp_offset; |
duke@435 | 257 | |
duke@435 | 258 | friend class nmethodLocker; |
duke@435 | 259 | |
duke@435 | 260 | // For native wrappers |
coleenp@4037 | 261 | nmethod(Method* method, |
duke@435 | 262 | int nmethod_size, |
twisti@2687 | 263 | int compile_id, |
duke@435 | 264 | CodeOffsets* offsets, |
duke@435 | 265 | CodeBuffer *code_buffer, |
duke@435 | 266 | int frame_size, |
duke@435 | 267 | ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ |
duke@435 | 268 | ByteSize basic_lock_sp_offset, /* synchronized natives only */ |
duke@435 | 269 | OopMapSet* oop_maps); |
duke@435 | 270 | |
kamg@551 | 271 | #ifdef HAVE_DTRACE_H |
kamg@551 | 272 | // For native wrappers |
coleenp@4037 | 273 | nmethod(Method* method, |
kamg@551 | 274 | int nmethod_size, |
kamg@551 | 275 | CodeOffsets* offsets, |
kamg@551 | 276 | CodeBuffer *code_buffer, |
kamg@551 | 277 | int frame_size); |
kamg@551 | 278 | #endif // def HAVE_DTRACE_H |
kamg@551 | 279 | |
duke@435 | 280 | // Creation support |
coleenp@4037 | 281 | nmethod(Method* method, |
duke@435 | 282 | int nmethod_size, |
duke@435 | 283 | int compile_id, |
duke@435 | 284 | int entry_bci, |
duke@435 | 285 | CodeOffsets* offsets, |
duke@435 | 286 | int orig_pc_offset, |
duke@435 | 287 | DebugInformationRecorder *recorder, |
duke@435 | 288 | Dependencies* dependencies, |
duke@435 | 289 | CodeBuffer *code_buffer, |
duke@435 | 290 | int frame_size, |
duke@435 | 291 | OopMapSet* oop_maps, |
duke@435 | 292 | ExceptionHandlerTable* handler_table, |
duke@435 | 293 | ImplicitExceptionTable* nul_chk_table, |
duke@435 | 294 | AbstractCompiler* compiler, |
duke@435 | 295 | int comp_level); |
duke@435 | 296 | |
duke@435 | 297 | // helper methods |
coleenp@5614 | 298 | void* operator new(size_t size, int nmethod_size) throw(); |
duke@435 | 299 | |
duke@435 | 300 | const char* reloc_string_for(u_char* begin, u_char* end); |
never@1544 | 301 | // Returns true if this thread changed the state of the nmethod or |
never@1544 | 302 | // false if another thread performed the transition. |
never@1576 | 303 | bool make_not_entrant_or_zombie(unsigned int state); |
duke@435 | 304 | void inc_decompile_count(); |
duke@435 | 305 | |
duke@435 | 306 | // Used to manipulate the exception cache |
duke@435 | 307 | void add_exception_cache_entry(ExceptionCache* new_entry); |
duke@435 | 308 | ExceptionCache* exception_cache_entry_for_exception(Handle exception); |
duke@435 | 309 | |
duke@435 | 310 | // Inform external interfaces that a compiled method has been unloaded |
never@1999 | 311 | void post_compiled_method_unload(); |
never@1999 | 312 | |
never@1999 | 313 | // Initailize fields to their default values |
never@1999 | 314 | void init_defaults(); |
duke@435 | 315 | |
duke@435 | 316 | public: |
duke@435 | 317 | // create nmethod with entry_bci |
duke@435 | 318 | static nmethod* new_nmethod(methodHandle method, |
duke@435 | 319 | int compile_id, |
duke@435 | 320 | int entry_bci, |
duke@435 | 321 | CodeOffsets* offsets, |
duke@435 | 322 | int orig_pc_offset, |
duke@435 | 323 | DebugInformationRecorder* recorder, |
duke@435 | 324 | Dependencies* dependencies, |
duke@435 | 325 | CodeBuffer *code_buffer, |
duke@435 | 326 | int frame_size, |
duke@435 | 327 | OopMapSet* oop_maps, |
duke@435 | 328 | ExceptionHandlerTable* handler_table, |
duke@435 | 329 | ImplicitExceptionTable* nul_chk_table, |
duke@435 | 330 | AbstractCompiler* compiler, |
duke@435 | 331 | int comp_level); |
duke@435 | 332 | |
duke@435 | 333 | static nmethod* new_native_nmethod(methodHandle method, |
twisti@2687 | 334 | int compile_id, |
duke@435 | 335 | CodeBuffer *code_buffer, |
duke@435 | 336 | int vep_offset, |
duke@435 | 337 | int frame_complete, |
duke@435 | 338 | int frame_size, |
duke@435 | 339 | ByteSize receiver_sp_offset, |
duke@435 | 340 | ByteSize basic_lock_sp_offset, |
duke@435 | 341 | OopMapSet* oop_maps); |
duke@435 | 342 | |
kamg@551 | 343 | #ifdef HAVE_DTRACE_H |
kamg@551 | 344 | // The method we generate for a dtrace probe has to look |
kamg@551 | 345 | // like an nmethod as far as the rest of the system is concerned |
kamg@551 | 346 | // which is somewhat unfortunate. |
kamg@551 | 347 | static nmethod* new_dtrace_nmethod(methodHandle method, |
kamg@551 | 348 | CodeBuffer *code_buffer, |
kamg@551 | 349 | int vep_offset, |
kamg@551 | 350 | int trap_offset, |
kamg@551 | 351 | int frame_complete, |
kamg@551 | 352 | int frame_size); |
kamg@551 | 353 | |
kamg@551 | 354 | int trap_offset() const { return _trap_offset; } |
twisti@2103 | 355 | address trap_address() const { return insts_begin() + _trap_offset; } |
kamg@551 | 356 | |
kamg@551 | 357 | #endif // def HAVE_DTRACE_H |
kamg@551 | 358 | |
duke@435 | 359 | // accessors |
coleenp@4037 | 360 | Method* method() const { return _method; } |
duke@435 | 361 | AbstractCompiler* compiler() const { return _compiler; } |
duke@435 | 362 | |
duke@435 | 363 | // type info |
duke@435 | 364 | bool is_nmethod() const { return true; } |
duke@435 | 365 | bool is_java_method() const { return !method()->is_native(); } |
duke@435 | 366 | bool is_native_method() const { return method()->is_native(); } |
duke@435 | 367 | bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } |
duke@435 | 368 | |
duke@435 | 369 | bool is_compiled_by_c1() const; |
duke@435 | 370 | bool is_compiled_by_c2() const; |
twisti@2047 | 371 | bool is_compiled_by_shark() const; |
duke@435 | 372 | |
duke@435 | 373 | // boundaries for different parts |
twisti@2117 | 374 | address consts_begin () const { return header_begin() + _consts_offset ; } |
twisti@2117 | 375 | address consts_end () const { return header_begin() + code_offset() ; } |
twisti@2117 | 376 | address insts_begin () const { return header_begin() + code_offset() ; } |
twisti@2103 | 377 | address insts_end () const { return header_begin() + _stub_offset ; } |
twisti@2117 | 378 | address stub_begin () const { return header_begin() + _stub_offset ; } |
twisti@2117 | 379 | address stub_end () const { return header_begin() + _oops_offset ; } |
twisti@1639 | 380 | address exception_begin () const { return header_begin() + _exception_offset ; } |
twisti@1639 | 381 | address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } |
twisti@1639 | 382 | address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } |
never@1813 | 383 | address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } |
twisti@1918 | 384 | oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } |
coleenp@4037 | 385 | oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } |
coleenp@4037 | 386 | |
coleenp@4037 | 387 | Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } |
coleenp@4037 | 388 | Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; } |
twisti@1918 | 389 | |
twisti@1639 | 390 | address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; } |
twisti@1639 | 391 | address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } |
twisti@1639 | 392 | PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } |
twisti@1639 | 393 | PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } |
twisti@1639 | 394 | address dependencies_begin () const { return header_begin() + _dependencies_offset ; } |
twisti@1639 | 395 | address dependencies_end () const { return header_begin() + _handler_table_offset ; } |
twisti@1639 | 396 | address handler_table_begin () const { return header_begin() + _handler_table_offset ; } |
twisti@1639 | 397 | address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } |
twisti@1639 | 398 | address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } |
twisti@1639 | 399 | address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } |
duke@435 | 400 | |
twisti@1918 | 401 | // Sizes |
twisti@2117 | 402 | int consts_size () const { return consts_end () - consts_begin (); } |
twisti@2103 | 403 | int insts_size () const { return insts_end () - insts_begin (); } |
twisti@1918 | 404 | int stub_size () const { return stub_end () - stub_begin (); } |
twisti@1918 | 405 | int oops_size () const { return (address) oops_end () - (address) oops_begin (); } |
coleenp@4037 | 406 | int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } |
twisti@1918 | 407 | int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); } |
twisti@1918 | 408 | int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); } |
twisti@1918 | 409 | int dependencies_size () const { return dependencies_end () - dependencies_begin (); } |
twisti@1918 | 410 | int handler_table_size() const { return handler_table_end() - handler_table_begin(); } |
twisti@1918 | 411 | int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } |
duke@435 | 412 | |
duke@435 | 413 | int total_size () const; |
duke@435 | 414 | |
anoll@5792 | 415 | void dec_hotness_counter() { _hotness_counter--; } |
anoll@5792 | 416 | void set_hotness_counter(int val) { _hotness_counter = val; } |
anoll@5792 | 417 | int hotness_counter() const { return _hotness_counter; } |
anoll@5792 | 418 | |
twisti@1918 | 419 | // Containment |
twisti@2117 | 420 | bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } |
twisti@2103 | 421 | bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } |
duke@435 | 422 | bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } |
twisti@1918 | 423 | bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } |
coleenp@4037 | 424 | bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } |
duke@435 | 425 | bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } |
duke@435 | 426 | bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } |
duke@435 | 427 | bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } |
duke@435 | 428 | bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } |
duke@435 | 429 | |
duke@435 | 430 | // entry points |
duke@435 | 431 | address entry_point() const { return _entry_point; } // normal entry point |
duke@435 | 432 | address verified_entry_point() const { return _verified_entry_point; } // if klass is correct |
duke@435 | 433 | |
duke@435 | 434 | // flag accessing and manipulation |
kvn@6172 | 435 | bool is_in_use() const { return _state == in_use; } |
kvn@6172 | 436 | bool is_alive() const { return _state == in_use || _state == not_entrant; } |
never@1999 | 437 | bool is_not_entrant() const { return _state == not_entrant; } |
never@1999 | 438 | bool is_zombie() const { return _state == zombie; } |
never@1999 | 439 | bool is_unloaded() const { return _state == unloaded; } |
duke@435 | 440 | |
kvn@6429 | 441 | #if INCLUDE_RTM_OPT |
kvn@6429 | 442 | // rtm state accessing and manipulating |
kvn@6429 | 443 | RTMState rtm_state() const { return _rtm_state; } |
kvn@6429 | 444 | void set_rtm_state(RTMState state) { _rtm_state = state; } |
kvn@6429 | 445 | #endif |
kvn@6429 | 446 | |
never@1544 | 447 | // Make the nmethod non entrant. The nmethod will continue to be |
never@1544 | 448 | // alive. It is used when an uncommon trap happens. Returns true |
never@1544 | 449 | // if this thread changed the state of the nmethod or false if |
never@1544 | 450 | // another thread performed the transition. |
anoll@5792 | 451 | bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } |
anoll@5792 | 452 | bool make_zombie() { return make_not_entrant_or_zombie(zombie); } |
duke@435 | 453 | |
duke@435 | 454 | // used by jvmti to track if the unload event has been reported |
duke@435 | 455 | bool unload_reported() { return _unload_reported; } |
duke@435 | 456 | void set_unload_reported() { _unload_reported = true; } |
duke@435 | 457 | |
stefank@6992 | 458 | void set_unloading_next(nmethod* next) { _unloading_next = next; } |
stefank@6992 | 459 | nmethod* unloading_next() { return _unloading_next; } |
stefank@6992 | 460 | |
stefank@6992 | 461 | static unsigned char global_unloading_clock() { return _global_unloading_clock; } |
stefank@6992 | 462 | static void increase_unloading_clock(); |
stefank@6992 | 463 | |
stefank@6992 | 464 | void set_unloading_clock(unsigned char unloading_clock); |
stefank@6992 | 465 | unsigned char unloading_clock(); |
stefank@6992 | 466 | |
never@1999 | 467 | bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } |
never@1999 | 468 | void mark_for_deoptimization() { _marked_for_deoptimization = true; } |
duke@435 | 469 | |
duke@435 | 470 | void make_unloaded(BoolObjectClosure* is_alive, oop cause); |
duke@435 | 471 | |
duke@435 | 472 | bool has_dependencies() { return dependencies_size() != 0; } |
duke@435 | 473 | void flush_dependencies(BoolObjectClosure* is_alive); |
never@1999 | 474 | bool has_flushed_dependencies() { return _has_flushed_dependencies; } |
never@1999 | 475 | void set_has_flushed_dependencies() { |
duke@435 | 476 | assert(!has_flushed_dependencies(), "should only happen once"); |
never@1999 | 477 | _has_flushed_dependencies = 1; |
duke@435 | 478 | } |
duke@435 | 479 | |
never@1999 | 480 | bool is_marked_for_reclamation() const { return _marked_for_reclamation; } |
never@1999 | 481 | void mark_for_reclamation() { _marked_for_reclamation = 1; } |
duke@435 | 482 | |
never@1999 | 483 | bool has_unsafe_access() const { return _has_unsafe_access; } |
never@1999 | 484 | void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } |
duke@435 | 485 | |
never@1999 | 486 | bool has_method_handle_invokes() const { return _has_method_handle_invokes; } |
never@1999 | 487 | void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } |
twisti@1570 | 488 | |
never@3500 | 489 | bool is_lazy_critical_native() const { return _lazy_critical_native; } |
never@3500 | 490 | void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } |
kvn@1637 | 491 | |
kvn@4103 | 492 | bool has_wide_vectors() const { return _has_wide_vectors; } |
kvn@4103 | 493 | void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } |
kvn@4103 | 494 | |
duke@435 | 495 | int comp_level() const { return _comp_level; } |
duke@435 | 496 | |
twisti@1918 | 497 | // Support for oops in scopes and relocs: |
twisti@1918 | 498 | // Note: index 0 is reserved for null. |
twisti@1918 | 499 | oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } |
twisti@1918 | 500 | oop* oop_addr_at(int index) const { // for GC |
twisti@1918 | 501 | // relocation indexes are biased by 1 (because 0 is reserved) |
twisti@1918 | 502 | assert(index > 0 && index <= oops_size(), "must be a valid non-zero index"); |
never@2081 | 503 | assert(!_oops_are_stale, "oops are stale"); |
twisti@1918 | 504 | return &oops_begin()[index - 1]; |
twisti@1918 | 505 | } |
twisti@1918 | 506 | |
coleenp@4037 | 507 | // Support for meta data in scopes and relocs: |
coleenp@4037 | 508 | // Note: index 0 is reserved for null. |
coleenp@4037 | 509 | Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } |
coleenp@4037 | 510 | Metadata** metadata_addr_at(int index) const { // for GC |
coleenp@4037 | 511 | // relocation indexes are biased by 1 (because 0 is reserved) |
coleenp@4037 | 512 | assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index"); |
coleenp@4037 | 513 | return &metadata_begin()[index - 1]; |
coleenp@4037 | 514 | } |
coleenp@4037 | 515 | |
coleenp@4037 | 516 | void copy_values(GrowableArray<jobject>* oops); |
coleenp@4037 | 517 | void copy_values(GrowableArray<Metadata*>* metadata); |
twisti@1918 | 518 | |
twisti@1918 | 519 | // Relocation support |
twisti@1918 | 520 | private: |
twisti@1918 | 521 | void fix_oop_relocations(address begin, address end, bool initialize_immediates); |
twisti@1918 | 522 | inline void initialize_immediate_oop(oop* dest, jobject handle); |
twisti@1918 | 523 | |
twisti@1918 | 524 | public: |
twisti@1918 | 525 | void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } |
twisti@1918 | 526 | void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } |
never@2657 | 527 | void verify_oop_relocations(); |
twisti@1918 | 528 | |
twisti@1918 | 529 | bool is_at_poll_return(address pc); |
twisti@1918 | 530 | bool is_at_poll_or_poll_return(address pc); |
twisti@1918 | 531 | |
jmasa@2909 | 532 | // Scavengable oop support |
jrose@1424 | 533 | bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } |
jrose@1424 | 534 | protected: |
jmasa@2909 | 535 | enum { sl_on_list = 0x01, sl_marked = 0x10 }; |
jmasa@2909 | 536 | void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } |
jrose@1424 | 537 | void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } |
jrose@1424 | 538 | // assertion-checking and pruning logic uses the bits of _scavenge_root_state |
jrose@1424 | 539 | #ifndef PRODUCT |
jmasa@2909 | 540 | void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } |
jmasa@2909 | 541 | void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } |
jmasa@2909 | 542 | bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } |
jrose@1424 | 543 | // N.B. there is no positive marked query, and we only use the not_marked query for asserts. |
jrose@1424 | 544 | #endif //PRODUCT |
jrose@1424 | 545 | nmethod* scavenge_root_link() const { return _scavenge_root_link; } |
jrose@1424 | 546 | void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } |
jrose@1424 | 547 | |
jrose@1424 | 548 | public: |
jrose@1424 | 549 | |
duke@435 | 550 | // Sweeper support |
duke@435 | 551 | long stack_traversal_mark() { return _stack_traversal_mark; } |
duke@435 | 552 | void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } |
duke@435 | 553 | |
duke@435 | 554 | // Exception cache support |
duke@435 | 555 | ExceptionCache* exception_cache() const { return _exception_cache; } |
duke@435 | 556 | void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } |
duke@435 | 557 | address handler_for_exception_and_pc(Handle exception, address pc); |
duke@435 | 558 | void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); |
stefank@6983 | 559 | void clean_exception_cache(BoolObjectClosure* is_alive); |
duke@435 | 560 | |
duke@435 | 561 | // implicit exceptions support |
duke@435 | 562 | address continuation_for_implicit_exception(address pc); |
duke@435 | 563 | |
duke@435 | 564 | // On-stack replacement support |
twisti@2687 | 565 | int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } |
twisti@2687 | 566 | address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } |
duke@435 | 567 | void invalidate_osr_method(); |
jrose@1424 | 568 | nmethod* osr_link() const { return _osr_link; } |
jrose@1424 | 569 | void set_osr_link(nmethod *n) { _osr_link = n; } |
duke@435 | 570 | |
duke@435 | 571 | // tells whether frames described by this nmethod can be deoptimized |
duke@435 | 572 | // note: native wrappers cannot be deoptimized. |
duke@435 | 573 | bool can_be_deoptimized() const { return is_java_method(); } |
duke@435 | 574 | |
duke@435 | 575 | // Inline cache support |
duke@435 | 576 | void clear_inline_caches(); |
duke@435 | 577 | void cleanup_inline_caches(); |
duke@435 | 578 | bool inlinecache_check_contains(address addr) const { |
twisti@2103 | 579 | return (addr >= code_begin() && addr < verified_entry_point()); |
duke@435 | 580 | } |
duke@435 | 581 | |
stefank@6992 | 582 | // Verify calls to dead methods have been cleaned. |
stefank@6992 | 583 | void verify_clean_inline_caches(); |
stefank@6992 | 584 | // Verify and count cached icholder relocations. |
stefank@6992 | 585 | int verify_icholder_relocations(); |
coleenp@4037 | 586 | // Check that all metadata is still alive |
coleenp@4037 | 587 | void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); |
coleenp@4037 | 588 | |
duke@435 | 589 | // unlink and deallocate this nmethod |
duke@435 | 590 | // Only NMethodSweeper class is expected to use this. NMethodSweeper is not |
duke@435 | 591 | // expected to use any other private methods/data in this class. |
duke@435 | 592 | |
duke@435 | 593 | protected: |
duke@435 | 594 | void flush(); |
duke@435 | 595 | |
duke@435 | 596 | public: |
dcubed@2624 | 597 | // When true is returned, it is unsafe to remove this nmethod even if |
dcubed@2624 | 598 | // it is a zombie, since the VM or the ServiceThread might still be |
dcubed@2624 | 599 | // using it. |
duke@435 | 600 | bool is_locked_by_vm() const { return _lock_count >0; } |
duke@435 | 601 | |
duke@435 | 602 | // See comment at definition of _last_seen_on_stack |
duke@435 | 603 | void mark_as_seen_on_stack(); |
duke@435 | 604 | bool can_not_entrant_be_converted(); |
duke@435 | 605 | |
coleenp@4037 | 606 | // Evolution support. We make old (discarded) compiled methods point to new Method*s. |
coleenp@4037 | 607 | void set_method(Method* method) { _method = method; } |
duke@435 | 608 | |
duke@435 | 609 | // GC support |
brutisso@4098 | 610 | void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); |
stefank@6992 | 611 | // The parallel versions are used by G1. |
stefank@6992 | 612 | bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); |
stefank@6992 | 613 | void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); |
stefank@6992 | 614 | // Unload a nmethod if the *root object is dead. |
brutisso@4098 | 615 | bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); |
duke@435 | 616 | |
duke@435 | 617 | void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, |
duke@435 | 618 | OopClosure* f); |
twisti@1918 | 619 | void oops_do(OopClosure* f) { oops_do(f, false); } |
johnc@5548 | 620 | void oops_do(OopClosure* f, bool allow_zombie); |
jrose@1424 | 621 | bool detect_scavenge_root_oops(); |
jrose@1424 | 622 | void verify_scavenge_root_oops() PRODUCT_RETURN; |
jrose@1424 | 623 | |
jrose@1424 | 624 | bool test_set_oops_do_mark(); |
jrose@1424 | 625 | static void oops_do_marking_prologue(); |
jrose@1424 | 626 | static void oops_do_marking_epilogue(); |
jrose@1424 | 627 | static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } |
johnc@3689 | 628 | bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } |
duke@435 | 629 | |
duke@435 | 630 | // ScopeDesc for an instruction |
duke@435 | 631 | ScopeDesc* scope_desc_at(address pc); |
duke@435 | 632 | |
duke@435 | 633 | private: |
duke@435 | 634 | ScopeDesc* scope_desc_in(address begin, address end); |
duke@435 | 635 | |
twisti@1639 | 636 | address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } |
duke@435 | 637 | |
duke@435 | 638 | PcDesc* find_pc_desc_internal(address pc, bool approximate); |
duke@435 | 639 | |
duke@435 | 640 | PcDesc* find_pc_desc(address pc, bool approximate) { |
duke@435 | 641 | PcDesc* desc = _pc_desc_cache.last_pc_desc(); |
twisti@2103 | 642 | if (desc != NULL && desc->pc_offset() == pc - code_begin()) { |
duke@435 | 643 | return desc; |
duke@435 | 644 | } |
duke@435 | 645 | return find_pc_desc_internal(pc, approximate); |
duke@435 | 646 | } |
duke@435 | 647 | |
duke@435 | 648 | public: |
duke@435 | 649 | // ScopeDesc retrieval operation |
duke@435 | 650 | PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } |
duke@435 | 651 | // pc_desc_near returns the first PcDesc at or after the givne pc. |
duke@435 | 652 | PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } |
duke@435 | 653 | |
duke@435 | 654 | public: |
duke@435 | 655 | // copying of debugging information |
duke@435 | 656 | void copy_scopes_pcs(PcDesc* pcs, int count); |
duke@435 | 657 | void copy_scopes_data(address buffer, int size); |
duke@435 | 658 | |
twisti@1639 | 659 | // Deopt |
twisti@1639 | 660 | // Return true is the PC is one would expect if the frame is being deopted. |
twisti@1639 | 661 | bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } |
twisti@1639 | 662 | bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); } |
twisti@1639 | 663 | bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } |
duke@435 | 664 | // Accessor/mutator for the original pc of a frame before a frame was deopted. |
duke@435 | 665 | address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } |
duke@435 | 666 | void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } |
duke@435 | 667 | |
twisti@1639 | 668 | static address get_deopt_original_pc(const frame* fr); |
twisti@1639 | 669 | |
twisti@1570 | 670 | // MethodHandle |
twisti@1570 | 671 | bool is_method_handle_return(address return_pc); |
twisti@1570 | 672 | |
duke@435 | 673 | // jvmti support: |
duke@435 | 674 | void post_compiled_method_load_event(); |
never@1971 | 675 | jmethodID get_and_cache_jmethod_id(); |
duke@435 | 676 | |
duke@435 | 677 | // verify operations |
duke@435 | 678 | void verify(); |
duke@435 | 679 | void verify_scopes(); |
duke@435 | 680 | void verify_interrupt_point(address interrupt_point); |
duke@435 | 681 | |
duke@435 | 682 | // printing support |
jrose@535 | 683 | void print() const; |
jrose@535 | 684 | void print_code(); |
duke@435 | 685 | void print_relocations() PRODUCT_RETURN; |
duke@435 | 686 | void print_pcs() PRODUCT_RETURN; |
duke@435 | 687 | void print_scopes() PRODUCT_RETURN; |
duke@435 | 688 | void print_dependencies() PRODUCT_RETURN; |
duke@435 | 689 | void print_value_on(outputStream* st) const PRODUCT_RETURN; |
duke@435 | 690 | void print_calls(outputStream* st) PRODUCT_RETURN; |
duke@435 | 691 | void print_handler_table() PRODUCT_RETURN; |
duke@435 | 692 | void print_nul_chk_table() PRODUCT_RETURN; |
jrose@535 | 693 | void print_nmethod(bool print_code); |
duke@435 | 694 | |
bobv@2036 | 695 | // need to re-define this from CodeBlob else the overload hides it |
bobv@2036 | 696 | virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } |
twisti@2687 | 697 | void print_on(outputStream* st, const char* msg) const; |
duke@435 | 698 | |
duke@435 | 699 | // Logging |
duke@435 | 700 | void log_identity(xmlStream* log) const; |
duke@435 | 701 | void log_new_nmethod() const; |
never@1544 | 702 | void log_state_change() const; |
duke@435 | 703 | |
jrose@1590 | 704 | // Prints block-level comments, including nmethod specific block labels: |
kvn@4107 | 705 | virtual void print_block_comment(outputStream* stream, address block_begin) const { |
jrose@1590 | 706 | print_nmethod_labels(stream, block_begin); |
jrose@1590 | 707 | CodeBlob::print_block_comment(stream, block_begin); |
jrose@1590 | 708 | } |
kvn@4107 | 709 | void print_nmethod_labels(outputStream* stream, address block_begin) const; |
jrose@1590 | 710 | |
duke@435 | 711 | // Prints a comment for one native instruction (reloc info, pc desc) |
jrose@535 | 712 | void print_code_comment_on(outputStream* st, int column, address begin, address end); |
duke@435 | 713 | static void print_statistics() PRODUCT_RETURN; |
duke@435 | 714 | |
duke@435 | 715 | // Compiler task identification. Note that all OSR methods |
duke@435 | 716 | // are numbered in an independent sequence if CICountOSR is true, |
duke@435 | 717 | // and native method wrappers are also numbered independently if |
duke@435 | 718 | // CICountNative is true. |
duke@435 | 719 | int compile_id() const { return _compile_id; } |
duke@435 | 720 | const char* compile_kind() const; |
duke@435 | 721 | |
duke@435 | 722 | // For debugging |
duke@435 | 723 | // CompiledIC* IC_at(char* p) const; |
duke@435 | 724 | // PrimitiveIC* primitiveIC_at(char* p) const; |
duke@435 | 725 | oop embeddedOop_at(address p); |
duke@435 | 726 | |
duke@435 | 727 | // tells if any of this method's dependencies have been invalidated |
duke@435 | 728 | // (this is expensive!) |
duke@435 | 729 | bool check_all_dependencies(); |
duke@435 | 730 | |
duke@435 | 731 | // tells if this compiled method is dependent on the given changes, |
duke@435 | 732 | // and the changes have invalidated it |
duke@435 | 733 | bool check_dependency_on(DepChange& changes); |
duke@435 | 734 | |
duke@435 | 735 | // Evolution support. Tells if this compiled method is dependent on any of |
duke@435 | 736 | // methods m() of class dependee, such that if m() in dependee is replaced, |
duke@435 | 737 | // this compiled method will have to be deoptimized. |
coleenp@4037 | 738 | bool is_evol_dependent_on(Klass* dependee); |
duke@435 | 739 | |
duke@435 | 740 | // Fast breakpoint support. Tells if this compiled method is |
duke@435 | 741 | // dependent on the given method. Returns true if this nmethod |
duke@435 | 742 | // corresponds to the given method as well. |
coleenp@4037 | 743 | bool is_dependent_on_method(Method* dependee); |
duke@435 | 744 | |
duke@435 | 745 | // is it ok to patch at address? |
duke@435 | 746 | bool is_patchable_at(address instr_address); |
duke@435 | 747 | |
duke@435 | 748 | // UseBiasedLocking support |
kamg@2361 | 749 | ByteSize native_receiver_sp_offset() { |
kamg@2361 | 750 | return _native_receiver_sp_offset; |
duke@435 | 751 | } |
kamg@2361 | 752 | ByteSize native_basic_lock_sp_offset() { |
kamg@2361 | 753 | return _native_basic_lock_sp_offset; |
duke@435 | 754 | } |
duke@435 | 755 | |
duke@435 | 756 | // support for code generation |
duke@435 | 757 | static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } |
duke@435 | 758 | static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } |
duke@435 | 759 | static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); } |
duke@435 | 760 | |
coleenp@4037 | 761 | // RedefineClasses support. Mark metadata in nmethods as on_stack so that |
coleenp@4037 | 762 | // redefine classes doesn't purge it. |
coleenp@4037 | 763 | static void mark_on_stack(nmethod* nm) { |
coleenp@4037 | 764 | nm->metadata_do(Metadata::mark_on_stack); |
coleenp@4037 | 765 | } |
coleenp@4037 | 766 | void metadata_do(void f(Metadata*)); |
duke@435 | 767 | }; |
duke@435 | 768 | |
dcubed@2624 | 769 | // Locks an nmethod so its code will not get removed and it will not |
dcubed@2624 | 770 | // be made into a zombie, even if it is a not_entrant method. After the |
dcubed@2624 | 771 | // nmethod becomes a zombie, if CompiledMethodUnload event processing |
dcubed@2624 | 772 | // needs to be done, then lock_nmethod() is used directly to keep the |
dcubed@2624 | 773 | // generated code from being reused too early. |
duke@435 | 774 | class nmethodLocker : public StackObj { |
duke@435 | 775 | nmethod* _nm; |
duke@435 | 776 | |
kamg@2511 | 777 | public: |
kamg@2511 | 778 | |
dcubed@2624 | 779 | // note: nm can be NULL |
dcubed@2624 | 780 | // Only JvmtiDeferredEvent::compiled_method_unload_event() |
dcubed@2624 | 781 | // should pass zombie_ok == true. |
dcubed@2624 | 782 | static void lock_nmethod(nmethod* nm, bool zombie_ok = false); |
duke@435 | 783 | static void unlock_nmethod(nmethod* nm); // (ditto) |
duke@435 | 784 | |
duke@435 | 785 | nmethodLocker(address pc); // derive nm from pc |
duke@435 | 786 | nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } |
duke@435 | 787 | nmethodLocker() { _nm = NULL; } |
duke@435 | 788 | ~nmethodLocker() { unlock_nmethod(_nm); } |
duke@435 | 789 | |
duke@435 | 790 | nmethod* code() { return _nm; } |
duke@435 | 791 | void set_code(nmethod* new_nm) { |
duke@435 | 792 | unlock_nmethod(_nm); // note: This works even if _nm==new_nm. |
duke@435 | 793 | _nm = new_nm; |
duke@435 | 794 | lock_nmethod(_nm); |
duke@435 | 795 | } |
duke@435 | 796 | }; |
stefank@2314 | 797 | |
stefank@2314 | 798 | #endif // SHARE_VM_CODE_NMETHOD_HPP |