Wed, 02 Feb 2011 14:38:01 -0500
6766644: Redefinition of compiled method fails with assertion "Can not load classes with the Compiler thread"
Summary: Defer posting events from the compiler thread: use service thread
Reviewed-by: coleenp, dholmes, never, dcubed
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 #define SHARE_VM_CODE_NMETHOD_HPP
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
31 // This class is used internally by nmethods, to cache
32 // exception/pc/handler information.
34 class ExceptionCache : public CHeapObj {
35 friend class VMStructs;
36 private:
37 static address _unwind_handler;
38 enum { cache_size = 16 };
39 klassOop _exception_type;
40 address _pc[cache_size];
41 address _handler[cache_size];
42 int _count;
43 ExceptionCache* _next;
45 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
46 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
48 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49 int count() { return _count; }
50 void increment_count() { _count++; }
52 public:
54 ExceptionCache(Handle exception, address pc, address handler);
56 klassOop exception_type() { return _exception_type; }
57 klassOop* exception_type_addr() { return &_exception_type; }
58 ExceptionCache* next() { return _next; }
59 void set_next(ExceptionCache *ec) { _next = ec; }
61 address match(Handle exception, address pc);
62 bool match_exception_with_space(Handle exception) ;
63 address test_address(address addr);
64 bool add_address_and_handler(address addr, address handler) ;
66 static address unwind_handler() { return _unwind_handler; }
67 };
70 // cache pc descs found in earlier inquiries
71 class PcDescCache VALUE_OBJ_CLASS_SPEC {
72 friend class VMStructs;
73 private:
74 enum { cache_size = 4 };
75 PcDesc* _last_pc_desc; // most recent pc_desc found
76 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
77 public:
78 PcDescCache() { debug_only(_last_pc_desc = NULL); }
79 void reset_to(PcDesc* initial_pc_desc);
80 PcDesc* find_pc_desc(int pc_offset, bool approximate);
81 void add_pc_desc(PcDesc* pc_desc);
82 PcDesc* last_pc_desc() { return _last_pc_desc; }
83 };
86 // nmethods (native methods) are the compiled code versions of Java methods.
87 //
88 // An nmethod contains:
89 // - header (the nmethod structure)
90 // [Relocation]
91 // - relocation information
92 // - constant part (doubles, longs and floats used in nmethod)
93 // - oop table
94 // [Code]
95 // - code body
96 // - exception handler
97 // - stub code
98 // [Debugging information]
99 // - oop array
100 // - data array
101 // - pcs
102 // [Exception handler table]
103 // - handler entry point array
104 // [Implicit Null Pointer exception table]
105 // - implicit null table array
107 class Dependencies;
108 class ExceptionHandlerTable;
109 class ImplicitExceptionTable;
110 class AbstractCompiler;
111 class xmlStream;
113 class nmethod : public CodeBlob {
114 friend class VMStructs;
115 friend class NMethodSweeper;
116 friend class CodeCache; // non-perm oops
117 private:
118 // Shared fields for all nmethod's
119 methodOop _method;
120 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
121 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
123 // To support simple linked-list chaining of nmethods:
124 nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
125 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
126 nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
128 static nmethod* volatile _oops_do_mark_nmethods;
129 nmethod* volatile _oops_do_mark_link;
131 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
133 // offsets for entry points
134 address _entry_point; // entry point with class check
135 address _verified_entry_point; // entry point without class check
136 address _osr_entry_point; // entry point for on stack replacement
138 // Offsets for different nmethod parts
139 int _exception_offset;
140 // All deoptee's will resume execution at this location described by
141 // this offset.
142 int _deoptimize_offset;
143 // All deoptee's at a MethodHandle call site will resume execution
144 // at this location described by this offset.
145 int _deoptimize_mh_offset;
146 // Offset of the unwind handler if it exists
147 int _unwind_handler_offset;
149 #ifdef HAVE_DTRACE_H
150 int _trap_offset;
151 #endif // def HAVE_DTRACE_H
152 int _consts_offset;
153 int _stub_offset;
154 int _oops_offset; // offset to where embedded oop table begins (inside data)
155 int _scopes_data_offset;
156 int _scopes_pcs_offset;
157 int _dependencies_offset;
158 int _handler_table_offset;
159 int _nul_chk_table_offset;
160 int _nmethod_end_offset;
162 // location in frame (offset for sp) that deopt can store the original
163 // pc during a deopt.
164 int _orig_pc_offset;
166 int _compile_id; // which compilation made this nmethod
167 int _comp_level; // compilation level
169 // protected by CodeCache_lock
170 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
171 bool _speculatively_disconnected; // Marked for potential unload
173 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
174 bool _marked_for_deoptimization; // Used for stack deoptimization
176 // used by jvmti to track if an unload event has been posted for this nmethod.
177 bool _unload_reported;
179 // set during construction
180 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
181 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
183 // Protected by Patching_lock
184 unsigned char _state; // {alive, not_entrant, zombie, unloaded)
186 #ifdef ASSERT
187 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
188 #endif
190 enum { alive = 0,
191 not_entrant = 1, // uncommon trap has happened but activations may still exist
192 zombie = 2,
193 unloaded = 3 };
196 jbyte _scavenge_root_state;
198 NOT_PRODUCT(bool _has_debug_info; )
200 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
201 jint _lock_count;
203 // not_entrant method removal. Each mark_sweep pass will update
204 // this mark to current sweep invocation count if it is seen on the
205 // stack. An not_entrant method can be removed when there is no
206 // more activations, i.e., when the _stack_traversal_mark is less than
207 // current sweep traversal index.
208 long _stack_traversal_mark;
210 ExceptionCache *_exception_cache;
211 PcDescCache _pc_desc_cache;
213 // These are used for compiled synchronized native methods to
214 // locate the owner and stack slot for the BasicLock so that we can
215 // properly revoke the bias of the owner if necessary. They are
216 // needed because there is no debug information for compiled native
217 // wrappers and the oop maps are insufficient to allow
218 // frame::retrieve_receiver() to work. Currently they are expected
219 // to be byte offsets from the Java stack pointer for maximum code
220 // sharing between platforms. Note that currently biased locking
221 // will never cause Class instances to be biased but this code
222 // handles the static synchronized case as well.
223 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
224 // for non-static native wrapper frames.
225 ByteSize _native_receiver_sp_offset;
226 ByteSize _native_basic_lock_sp_offset;
228 friend class nmethodLocker;
230 // For native wrappers
231 nmethod(methodOop method,
232 int nmethod_size,
233 CodeOffsets* offsets,
234 CodeBuffer *code_buffer,
235 int frame_size,
236 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
237 ByteSize basic_lock_sp_offset, /* synchronized natives only */
238 OopMapSet* oop_maps);
240 #ifdef HAVE_DTRACE_H
241 // For native wrappers
242 nmethod(methodOop method,
243 int nmethod_size,
244 CodeOffsets* offsets,
245 CodeBuffer *code_buffer,
246 int frame_size);
247 #endif // def HAVE_DTRACE_H
249 // Creation support
250 nmethod(methodOop method,
251 int nmethod_size,
252 int compile_id,
253 int entry_bci,
254 CodeOffsets* offsets,
255 int orig_pc_offset,
256 DebugInformationRecorder *recorder,
257 Dependencies* dependencies,
258 CodeBuffer *code_buffer,
259 int frame_size,
260 OopMapSet* oop_maps,
261 ExceptionHandlerTable* handler_table,
262 ImplicitExceptionTable* nul_chk_table,
263 AbstractCompiler* compiler,
264 int comp_level);
266 // helper methods
267 void* operator new(size_t size, int nmethod_size);
269 const char* reloc_string_for(u_char* begin, u_char* end);
270 // Returns true if this thread changed the state of the nmethod or
271 // false if another thread performed the transition.
272 bool make_not_entrant_or_zombie(unsigned int state);
273 void inc_decompile_count();
275 // Used to manipulate the exception cache
276 void add_exception_cache_entry(ExceptionCache* new_entry);
277 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
279 // Inform external interfaces that a compiled method has been unloaded
280 void post_compiled_method_unload();
282 // Initailize fields to their default values
283 void init_defaults();
285 public:
286 // create nmethod with entry_bci
287 static nmethod* new_nmethod(methodHandle method,
288 int compile_id,
289 int entry_bci,
290 CodeOffsets* offsets,
291 int orig_pc_offset,
292 DebugInformationRecorder* recorder,
293 Dependencies* dependencies,
294 CodeBuffer *code_buffer,
295 int frame_size,
296 OopMapSet* oop_maps,
297 ExceptionHandlerTable* handler_table,
298 ImplicitExceptionTable* nul_chk_table,
299 AbstractCompiler* compiler,
300 int comp_level);
302 static nmethod* new_native_nmethod(methodHandle method,
303 CodeBuffer *code_buffer,
304 int vep_offset,
305 int frame_complete,
306 int frame_size,
307 ByteSize receiver_sp_offset,
308 ByteSize basic_lock_sp_offset,
309 OopMapSet* oop_maps);
311 #ifdef HAVE_DTRACE_H
312 // The method we generate for a dtrace probe has to look
313 // like an nmethod as far as the rest of the system is concerned
314 // which is somewhat unfortunate.
315 static nmethod* new_dtrace_nmethod(methodHandle method,
316 CodeBuffer *code_buffer,
317 int vep_offset,
318 int trap_offset,
319 int frame_complete,
320 int frame_size);
322 int trap_offset() const { return _trap_offset; }
323 address trap_address() const { return insts_begin() + _trap_offset; }
325 #endif // def HAVE_DTRACE_H
327 // accessors
328 methodOop method() const { return _method; }
329 AbstractCompiler* compiler() const { return _compiler; }
331 #ifndef PRODUCT
332 bool has_debug_info() const { return _has_debug_info; }
333 void set_has_debug_info(bool f) { _has_debug_info = false; }
334 #endif // NOT PRODUCT
336 // type info
337 bool is_nmethod() const { return true; }
338 bool is_java_method() const { return !method()->is_native(); }
339 bool is_native_method() const { return method()->is_native(); }
340 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
342 bool is_compiled_by_c1() const;
343 bool is_compiled_by_c2() const;
344 bool is_compiled_by_shark() const;
346 // boundaries for different parts
347 address consts_begin () const { return header_begin() + _consts_offset ; }
348 address consts_end () const { return header_begin() + code_offset() ; }
349 address insts_begin () const { return header_begin() + code_offset() ; }
350 address insts_end () const { return header_begin() + _stub_offset ; }
351 address stub_begin () const { return header_begin() + _stub_offset ; }
352 address stub_end () const { return header_begin() + _oops_offset ; }
353 address exception_begin () const { return header_begin() + _exception_offset ; }
354 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
355 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
356 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
357 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
358 oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
360 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
361 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
362 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
363 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
364 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
365 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
366 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
367 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
368 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
369 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
371 // Sizes
372 int consts_size () const { return consts_end () - consts_begin (); }
373 int insts_size () const { return insts_end () - insts_begin (); }
374 int stub_size () const { return stub_end () - stub_begin (); }
375 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
376 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
377 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
378 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
379 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
380 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
382 int total_size () const;
384 // Containment
385 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
386 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
387 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
388 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
389 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
390 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
391 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
392 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
394 // entry points
395 address entry_point() const { return _entry_point; } // normal entry point
396 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
398 // flag accessing and manipulation
399 bool is_in_use() const { return _state == alive; }
400 bool is_alive() const { return _state == alive || _state == not_entrant; }
401 bool is_not_entrant() const { return _state == not_entrant; }
402 bool is_zombie() const { return _state == zombie; }
403 bool is_unloaded() const { return _state == unloaded; }
405 // Make the nmethod non entrant. The nmethod will continue to be
406 // alive. It is used when an uncommon trap happens. Returns true
407 // if this thread changed the state of the nmethod or false if
408 // another thread performed the transition.
409 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
410 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
412 // used by jvmti to track if the unload event has been reported
413 bool unload_reported() { return _unload_reported; }
414 void set_unload_reported() { _unload_reported = true; }
416 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
417 void mark_for_deoptimization() { _marked_for_deoptimization = true; }
419 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
421 bool has_dependencies() { return dependencies_size() != 0; }
422 void flush_dependencies(BoolObjectClosure* is_alive);
423 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
424 void set_has_flushed_dependencies() {
425 assert(!has_flushed_dependencies(), "should only happen once");
426 _has_flushed_dependencies = 1;
427 }
429 bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
430 void mark_for_reclamation() { _marked_for_reclamation = 1; }
432 bool has_unsafe_access() const { return _has_unsafe_access; }
433 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
435 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
436 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
438 bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
439 void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
441 int comp_level() const { return _comp_level; }
443 // Support for oops in scopes and relocs:
444 // Note: index 0 is reserved for null.
445 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
446 oop* oop_addr_at(int index) const { // for GC
447 // relocation indexes are biased by 1 (because 0 is reserved)
448 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
449 assert(!_oops_are_stale, "oops are stale");
450 return &oops_begin()[index - 1];
451 }
453 void copy_oops(GrowableArray<jobject>* oops);
455 // Relocation support
456 private:
457 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
458 inline void initialize_immediate_oop(oop* dest, jobject handle);
460 public:
461 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
462 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
464 bool is_at_poll_return(address pc);
465 bool is_at_poll_or_poll_return(address pc);
467 // Non-perm oop support
468 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
469 protected:
470 enum { npl_on_list = 0x01, npl_marked = 0x10 };
471 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
472 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
473 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
474 #ifndef PRODUCT
475 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
476 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
477 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
478 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
479 #endif //PRODUCT
480 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
481 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
483 nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
484 void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
486 public:
488 // Sweeper support
489 long stack_traversal_mark() { return _stack_traversal_mark; }
490 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
492 // Exception cache support
493 ExceptionCache* exception_cache() const { return _exception_cache; }
494 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
495 address handler_for_exception_and_pc(Handle exception, address pc);
496 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
497 void remove_from_exception_cache(ExceptionCache* ec);
499 // implicit exceptions support
500 address continuation_for_implicit_exception(address pc);
502 // On-stack replacement support
503 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
504 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
505 void invalidate_osr_method();
506 nmethod* osr_link() const { return _osr_link; }
507 void set_osr_link(nmethod *n) { _osr_link = n; }
509 // tells whether frames described by this nmethod can be deoptimized
510 // note: native wrappers cannot be deoptimized.
511 bool can_be_deoptimized() const { return is_java_method(); }
513 // Inline cache support
514 void clear_inline_caches();
515 void cleanup_inline_caches();
516 bool inlinecache_check_contains(address addr) const {
517 return (addr >= code_begin() && addr < verified_entry_point());
518 }
520 // unlink and deallocate this nmethod
521 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
522 // expected to use any other private methods/data in this class.
524 protected:
525 void flush();
527 public:
528 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
529 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
530 bool is_locked_by_vm() const { return _lock_count >0; }
532 // See comment at definition of _last_seen_on_stack
533 void mark_as_seen_on_stack();
534 bool can_not_entrant_be_converted();
536 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
537 void set_method(methodOop method) { _method = method; }
539 // GC support
540 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
541 bool unloading_occurred);
542 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
543 oop* root, bool unloading_occurred);
545 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
546 OopClosure* f);
547 void oops_do(OopClosure* f) { oops_do(f, false); }
548 void oops_do(OopClosure* f, bool do_strong_roots_only);
549 bool detect_scavenge_root_oops();
550 void verify_scavenge_root_oops() PRODUCT_RETURN;
552 bool test_set_oops_do_mark();
553 static void oops_do_marking_prologue();
554 static void oops_do_marking_epilogue();
555 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
556 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
558 // ScopeDesc for an instruction
559 ScopeDesc* scope_desc_at(address pc);
561 private:
562 ScopeDesc* scope_desc_in(address begin, address end);
564 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
566 PcDesc* find_pc_desc_internal(address pc, bool approximate);
568 PcDesc* find_pc_desc(address pc, bool approximate) {
569 PcDesc* desc = _pc_desc_cache.last_pc_desc();
570 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
571 return desc;
572 }
573 return find_pc_desc_internal(pc, approximate);
574 }
576 public:
577 // ScopeDesc retrieval operation
578 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
579 // pc_desc_near returns the first PcDesc at or after the givne pc.
580 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
582 public:
583 // copying of debugging information
584 void copy_scopes_pcs(PcDesc* pcs, int count);
585 void copy_scopes_data(address buffer, int size);
587 // Deopt
588 // Return true is the PC is one would expect if the frame is being deopted.
589 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
590 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
591 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
592 // Accessor/mutator for the original pc of a frame before a frame was deopted.
593 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
594 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
596 static address get_deopt_original_pc(const frame* fr);
598 // MethodHandle
599 bool is_method_handle_return(address return_pc);
601 // jvmti support:
602 void post_compiled_method_load_event();
603 jmethodID get_and_cache_jmethod_id();
605 // verify operations
606 void verify();
607 void verify_scopes();
608 void verify_interrupt_point(address interrupt_point);
610 // print compilation helper
611 static void print_compilation(outputStream *st, const char *method_name, const char *title,
612 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
614 // printing support
615 void print() const;
616 void print_code();
617 void print_relocations() PRODUCT_RETURN;
618 void print_pcs() PRODUCT_RETURN;
619 void print_scopes() PRODUCT_RETURN;
620 void print_dependencies() PRODUCT_RETURN;
621 void print_value_on(outputStream* st) const PRODUCT_RETURN;
622 void print_calls(outputStream* st) PRODUCT_RETURN;
623 void print_handler_table() PRODUCT_RETURN;
624 void print_nul_chk_table() PRODUCT_RETURN;
625 void print_nmethod(bool print_code);
627 // need to re-define this from CodeBlob else the overload hides it
628 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
629 void print_on(outputStream* st, const char* title) const;
631 // Logging
632 void log_identity(xmlStream* log) const;
633 void log_new_nmethod() const;
634 void log_state_change() const;
636 // Prints block-level comments, including nmethod specific block labels:
637 virtual void print_block_comment(outputStream* stream, address block_begin) {
638 print_nmethod_labels(stream, block_begin);
639 CodeBlob::print_block_comment(stream, block_begin);
640 }
641 void print_nmethod_labels(outputStream* stream, address block_begin);
643 // Prints a comment for one native instruction (reloc info, pc desc)
644 void print_code_comment_on(outputStream* st, int column, address begin, address end);
645 static void print_statistics() PRODUCT_RETURN;
647 // Compiler task identification. Note that all OSR methods
648 // are numbered in an independent sequence if CICountOSR is true,
649 // and native method wrappers are also numbered independently if
650 // CICountNative is true.
651 int compile_id() const { return _compile_id; }
652 const char* compile_kind() const;
654 // For debugging
655 // CompiledIC* IC_at(char* p) const;
656 // PrimitiveIC* primitiveIC_at(char* p) const;
657 oop embeddedOop_at(address p);
659 // tells if any of this method's dependencies have been invalidated
660 // (this is expensive!)
661 bool check_all_dependencies();
663 // tells if this compiled method is dependent on the given changes,
664 // and the changes have invalidated it
665 bool check_dependency_on(DepChange& changes);
667 // Evolution support. Tells if this compiled method is dependent on any of
668 // methods m() of class dependee, such that if m() in dependee is replaced,
669 // this compiled method will have to be deoptimized.
670 bool is_evol_dependent_on(klassOop dependee);
672 // Fast breakpoint support. Tells if this compiled method is
673 // dependent on the given method. Returns true if this nmethod
674 // corresponds to the given method as well.
675 bool is_dependent_on_method(methodOop dependee);
677 // is it ok to patch at address?
678 bool is_patchable_at(address instr_address);
680 // UseBiasedLocking support
681 ByteSize native_receiver_sp_offset() {
682 return _native_receiver_sp_offset;
683 }
684 ByteSize native_basic_lock_sp_offset() {
685 return _native_basic_lock_sp_offset;
686 }
688 // support for code generation
689 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
690 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
691 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
693 };
695 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
696 class nmethodLocker : public StackObj {
697 nmethod* _nm;
699 public:
701 static void lock_nmethod(nmethod* nm); // note: nm can be NULL
702 static void unlock_nmethod(nmethod* nm); // (ditto)
704 nmethodLocker(address pc); // derive nm from pc
705 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
706 nmethodLocker() { _nm = NULL; }
707 ~nmethodLocker() { unlock_nmethod(_nm); }
709 nmethod* code() { return _nm; }
710 void set_code(nmethod* new_nm) {
711 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
712 _nm = new_nm;
713 lock_nmethod(_nm);
714 }
715 };
717 #endif // SHARE_VM_CODE_NMETHOD_HPP