Fri, 29 Jan 2010 12:13:05 +0100
6917766: JSR 292 needs its own deopt handler
Summary: We need to introduce a new MH deopt handler so we can easily determine if the deopt happened at a MH call site or not.
Reviewed-by: never, jrose
1 /*
2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // This class is used internally by nmethods, to cache
26 // exception/pc/handler information.
28 class ExceptionCache : public CHeapObj {
29 friend class VMStructs;
30 private:
31 static address _unwind_handler;
32 enum { cache_size = 16 };
33 klassOop _exception_type;
34 address _pc[cache_size];
35 address _handler[cache_size];
36 int _count;
37 ExceptionCache* _next;
39 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
40 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
41 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
42 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
43 int count() { return _count; }
44 void increment_count() { _count++; }
46 public:
48 ExceptionCache(Handle exception, address pc, address handler);
50 klassOop exception_type() { return _exception_type; }
51 klassOop* exception_type_addr() { return &_exception_type; }
52 ExceptionCache* next() { return _next; }
53 void set_next(ExceptionCache *ec) { _next = ec; }
55 address match(Handle exception, address pc);
56 bool match_exception_with_space(Handle exception) ;
57 address test_address(address addr);
58 bool add_address_and_handler(address addr, address handler) ;
60 static address unwind_handler() { return _unwind_handler; }
61 };
64 // cache pc descs found in earlier inquiries
65 class PcDescCache VALUE_OBJ_CLASS_SPEC {
66 friend class VMStructs;
67 private:
68 enum { cache_size = 4 };
69 PcDesc* _last_pc_desc; // most recent pc_desc found
70 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
71 public:
72 PcDescCache() { debug_only(_last_pc_desc = NULL); }
73 void reset_to(PcDesc* initial_pc_desc);
74 PcDesc* find_pc_desc(int pc_offset, bool approximate);
75 void add_pc_desc(PcDesc* pc_desc);
76 PcDesc* last_pc_desc() { return _last_pc_desc; }
77 };
80 // nmethods (native methods) are the compiled code versions of Java methods.
82 struct nmFlags {
83 friend class VMStructs;
84 unsigned int version:8; // version number (0 = first version)
85 unsigned int level:4; // optimization level
86 unsigned int age:4; // age (in # of sweep steps)
88 unsigned int state:2; // {alive, zombie, unloaded)
90 unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
91 unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
92 unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
93 unsigned int markedForReclamation:1; // Used by NMethodSweeper
95 unsigned int has_unsafe_access:1; // May fault due to unsafe access.
96 unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
98 void clear();
99 };
102 // A nmethod contains:
103 // - header (the nmethod structure)
104 // [Relocation]
105 // - relocation information
106 // - constant part (doubles, longs and floats used in nmethod)
107 // [Code]
108 // - code body
109 // - exception handler
110 // - stub code
111 // [Debugging information]
112 // - oop array
113 // - data array
114 // - pcs
115 // [Exception handler table]
116 // - handler entry point array
117 // [Implicit Null Pointer exception table]
118 // - implicit null table array
120 class Dependencies;
121 class ExceptionHandlerTable;
122 class ImplicitExceptionTable;
123 class AbstractCompiler;
124 class xmlStream;
126 class nmethod : public CodeBlob {
127 friend class VMStructs;
128 friend class NMethodSweeper;
129 friend class CodeCache; // non-perm oops
130 private:
131 // Shared fields for all nmethod's
132 static int _zombie_instruction_size;
134 methodOop _method;
135 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
137 // To support simple linked-list chaining of nmethods:
138 nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
139 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
141 static nmethod* volatile _oops_do_mark_nmethods;
142 nmethod* volatile _oops_do_mark_link;
144 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
146 // Offsets for different nmethod parts
147 int _exception_offset;
148 // All deoptee's will resume execution at this location described by
149 // this offset.
150 int _deoptimize_offset;
151 // All deoptee's at a MethodHandle call site will resume execution
152 // at this location described by this offset.
153 int _deoptimize_mh_offset;
154 #ifdef HAVE_DTRACE_H
155 int _trap_offset;
156 #endif // def HAVE_DTRACE_H
157 int _stub_offset;
158 int _consts_offset;
159 int _scopes_data_offset;
160 int _scopes_pcs_offset;
161 int _dependencies_offset;
162 int _handler_table_offset;
163 int _nul_chk_table_offset;
164 int _nmethod_end_offset;
166 // location in frame (offset for sp) that deopt can store the original
167 // pc during a deopt.
168 int _orig_pc_offset;
170 int _compile_id; // which compilation made this nmethod
171 int _comp_level; // compilation level
173 // offsets for entry points
174 address _entry_point; // entry point with class check
175 address _verified_entry_point; // entry point without class check
176 address _osr_entry_point; // entry point for on stack replacement
178 nmFlags flags; // various flags to keep track of nmethod state
179 bool _markedForDeoptimization; // Used for stack deoptimization
180 enum { alive = 0,
181 not_entrant = 1, // uncommon trap has happened but activations may still exist
182 zombie = 2,
183 unloaded = 3 };
185 // used by jvmti to track if an unload event has been posted for this nmethod.
186 bool _unload_reported;
188 jbyte _scavenge_root_state;
190 NOT_PRODUCT(bool _has_debug_info; )
192 // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
193 jint _lock_count;
195 // not_entrant method removal. Each mark_sweep pass will update
196 // this mark to current sweep invocation count if it is seen on the
197 // stack. An not_entrant method can be removed when there is no
198 // more activations, i.e., when the _stack_traversal_mark is less than
199 // current sweep traversal index.
200 long _stack_traversal_mark;
202 ExceptionCache *_exception_cache;
203 PcDescCache _pc_desc_cache;
205 // These are only used for compiled synchronized native methods to
206 // locate the owner and stack slot for the BasicLock so that we can
207 // properly revoke the bias of the owner if necessary. They are
208 // needed because there is no debug information for compiled native
209 // wrappers and the oop maps are insufficient to allow
210 // frame::retrieve_receiver() to work. Currently they are expected
211 // to be byte offsets from the Java stack pointer for maximum code
212 // sharing between platforms. Note that currently biased locking
213 // will never cause Class instances to be biased but this code
214 // handles the static synchronized case as well.
215 ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
216 ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
218 friend class nmethodLocker;
220 // For native wrappers
221 nmethod(methodOop method,
222 int nmethod_size,
223 CodeOffsets* offsets,
224 CodeBuffer *code_buffer,
225 int frame_size,
226 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
227 ByteSize basic_lock_sp_offset, /* synchronized natives only */
228 OopMapSet* oop_maps);
230 #ifdef HAVE_DTRACE_H
231 // For native wrappers
232 nmethod(methodOop method,
233 int nmethod_size,
234 CodeOffsets* offsets,
235 CodeBuffer *code_buffer,
236 int frame_size);
237 #endif // def HAVE_DTRACE_H
239 // Creation support
240 nmethod(methodOop method,
241 int nmethod_size,
242 int compile_id,
243 int entry_bci,
244 CodeOffsets* offsets,
245 int orig_pc_offset,
246 DebugInformationRecorder *recorder,
247 Dependencies* dependencies,
248 CodeBuffer *code_buffer,
249 int frame_size,
250 OopMapSet* oop_maps,
251 ExceptionHandlerTable* handler_table,
252 ImplicitExceptionTable* nul_chk_table,
253 AbstractCompiler* compiler,
254 int comp_level);
256 // helper methods
257 void* operator new(size_t size, int nmethod_size);
259 const char* reloc_string_for(u_char* begin, u_char* end);
260 // Returns true if this thread changed the state of the nmethod or
261 // false if another thread performed the transition.
262 bool make_not_entrant_or_zombie(unsigned int state);
263 void inc_decompile_count();
265 // used to check that writes to nmFlags are done consistently.
266 static void check_safepoint() PRODUCT_RETURN;
268 // Used to manipulate the exception cache
269 void add_exception_cache_entry(ExceptionCache* new_entry);
270 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
272 // Inform external interfaces that a compiled method has been unloaded
273 inline void post_compiled_method_unload();
275 public:
276 // create nmethod with entry_bci
277 static nmethod* new_nmethod(methodHandle method,
278 int compile_id,
279 int entry_bci,
280 CodeOffsets* offsets,
281 int orig_pc_offset,
282 DebugInformationRecorder* recorder,
283 Dependencies* dependencies,
284 CodeBuffer *code_buffer,
285 int frame_size,
286 OopMapSet* oop_maps,
287 ExceptionHandlerTable* handler_table,
288 ImplicitExceptionTable* nul_chk_table,
289 AbstractCompiler* compiler,
290 int comp_level);
292 static nmethod* new_native_nmethod(methodHandle method,
293 CodeBuffer *code_buffer,
294 int vep_offset,
295 int frame_complete,
296 int frame_size,
297 ByteSize receiver_sp_offset,
298 ByteSize basic_lock_sp_offset,
299 OopMapSet* oop_maps);
301 #ifdef HAVE_DTRACE_H
302 // The method we generate for a dtrace probe has to look
303 // like an nmethod as far as the rest of the system is concerned
304 // which is somewhat unfortunate.
305 static nmethod* new_dtrace_nmethod(methodHandle method,
306 CodeBuffer *code_buffer,
307 int vep_offset,
308 int trap_offset,
309 int frame_complete,
310 int frame_size);
312 int trap_offset() const { return _trap_offset; }
313 address trap_address() const { return code_begin() + _trap_offset; }
315 #endif // def HAVE_DTRACE_H
317 // accessors
318 methodOop method() const { return _method; }
319 AbstractCompiler* compiler() const { return _compiler; }
321 #ifndef PRODUCT
322 bool has_debug_info() const { return _has_debug_info; }
323 void set_has_debug_info(bool f) { _has_debug_info = false; }
324 #endif // NOT PRODUCT
326 // type info
327 bool is_nmethod() const { return true; }
328 bool is_java_method() const { return !method()->is_native(); }
329 bool is_native_method() const { return method()->is_native(); }
330 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
332 bool is_compiled_by_c1() const;
333 bool is_compiled_by_c2() const;
335 // boundaries for different parts
336 address code_begin () const { return _entry_point; }
337 address code_end () const { return header_begin() + _stub_offset ; }
338 address exception_begin () const { return header_begin() + _exception_offset ; }
339 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
340 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
341 address stub_begin () const { return header_begin() + _stub_offset ; }
342 address stub_end () const { return header_begin() + _consts_offset ; }
343 address consts_begin () const { return header_begin() + _consts_offset ; }
344 address consts_end () const { return header_begin() + _scopes_data_offset ; }
345 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
346 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
347 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
348 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
349 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
350 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
351 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
352 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
353 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
354 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
356 int code_size () const { return code_end () - code_begin (); }
357 int stub_size () const { return stub_end () - stub_begin (); }
358 int consts_size () const { return consts_end () - consts_begin (); }
359 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
360 int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
361 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
362 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
363 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
365 int total_size () const;
367 bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
368 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
369 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
370 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
371 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
372 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
373 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
375 // entry points
376 address entry_point() const { return _entry_point; } // normal entry point
377 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
379 // flag accessing and manipulation
380 bool is_in_use() const { return flags.state == alive; }
381 bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
382 bool is_not_entrant() const { return flags.state == not_entrant; }
383 bool is_zombie() const { return flags.state == zombie; }
384 bool is_unloaded() const { return flags.state == unloaded; }
386 // Make the nmethod non entrant. The nmethod will continue to be
387 // alive. It is used when an uncommon trap happens. Returns true
388 // if this thread changed the state of the nmethod or false if
389 // another thread performed the transition.
390 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
391 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
393 // used by jvmti to track if the unload event has been reported
394 bool unload_reported() { return _unload_reported; }
395 void set_unload_reported() { _unload_reported = true; }
397 bool is_marked_for_deoptimization() const { return _markedForDeoptimization; }
398 void mark_for_deoptimization() { _markedForDeoptimization = true; }
400 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
402 bool has_dependencies() { return dependencies_size() != 0; }
403 void flush_dependencies(BoolObjectClosure* is_alive);
404 bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
405 void set_has_flushed_dependencies() {
406 check_safepoint();
407 assert(!has_flushed_dependencies(), "should only happen once");
408 flags.hasFlushedDependencies = 1;
409 }
411 bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
412 void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
413 void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
415 bool has_unsafe_access() const { return flags.has_unsafe_access; }
416 void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
418 bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
419 void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
421 int level() const { return flags.level; }
422 void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
424 int comp_level() const { return _comp_level; }
426 int version() const { return flags.version; }
427 void set_version(int v);
429 // Non-perm oop support
430 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
431 protected:
432 enum { npl_on_list = 0x01, npl_marked = 0x10 };
433 void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
434 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
435 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
436 #ifndef PRODUCT
437 void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
438 void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
439 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
440 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
441 #endif //PRODUCT
442 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
443 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
445 public:
447 // Sweeper support
448 long stack_traversal_mark() { return _stack_traversal_mark; }
449 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
451 // Exception cache support
452 ExceptionCache* exception_cache() const { return _exception_cache; }
453 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
454 address handler_for_exception_and_pc(Handle exception, address pc);
455 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
456 void remove_from_exception_cache(ExceptionCache* ec);
458 // implicit exceptions support
459 address continuation_for_implicit_exception(address pc);
461 // On-stack replacement support
462 int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
463 address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
464 void invalidate_osr_method();
465 nmethod* osr_link() const { return _osr_link; }
466 void set_osr_link(nmethod *n) { _osr_link = n; }
468 // tells whether frames described by this nmethod can be deoptimized
469 // note: native wrappers cannot be deoptimized.
470 bool can_be_deoptimized() const { return is_java_method(); }
472 // Inline cache support
473 void clear_inline_caches();
474 void cleanup_inline_caches();
475 bool inlinecache_check_contains(address addr) const {
476 return (addr >= instructions_begin() && addr < verified_entry_point());
477 }
479 // unlink and deallocate this nmethod
480 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
481 // expected to use any other private methods/data in this class.
483 protected:
484 void flush();
486 public:
487 // If returning true, it is unsafe to remove this nmethod even though it is a zombie
488 // nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
489 bool is_locked_by_vm() const { return _lock_count >0; }
491 // See comment at definition of _last_seen_on_stack
492 void mark_as_seen_on_stack();
493 bool can_not_entrant_be_converted();
495 // Evolution support. We make old (discarded) compiled methods point to new methodOops.
496 void set_method(methodOop method) { _method = method; }
498 // GC support
499 void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
500 bool unloading_occurred);
501 bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
502 oop* root, bool unloading_occurred);
504 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
505 OopClosure* f);
506 virtual void oops_do(OopClosure* f) { oops_do(f, false); }
507 void oops_do(OopClosure* f, bool do_strong_roots_only);
508 bool detect_scavenge_root_oops();
509 void verify_scavenge_root_oops() PRODUCT_RETURN;
511 bool test_set_oops_do_mark();
512 static void oops_do_marking_prologue();
513 static void oops_do_marking_epilogue();
514 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
515 DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
517 // ScopeDesc for an instruction
518 ScopeDesc* scope_desc_at(address pc);
520 private:
521 ScopeDesc* scope_desc_in(address begin, address end);
523 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
525 PcDesc* find_pc_desc_internal(address pc, bool approximate);
527 PcDesc* find_pc_desc(address pc, bool approximate) {
528 PcDesc* desc = _pc_desc_cache.last_pc_desc();
529 if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
530 return desc;
531 }
532 return find_pc_desc_internal(pc, approximate);
533 }
535 public:
536 // ScopeDesc retrieval operation
537 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
538 // pc_desc_near returns the first PcDesc at or after the givne pc.
539 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
541 public:
542 // copying of debugging information
543 void copy_scopes_pcs(PcDesc* pcs, int count);
544 void copy_scopes_data(address buffer, int size);
546 // Deopt
547 // Return true is the PC is one would expect if the frame is being deopted.
548 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
549 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
550 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
551 // Accessor/mutator for the original pc of a frame before a frame was deopted.
552 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
553 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
555 static address get_deopt_original_pc(const frame* fr);
557 // MethodHandle
558 bool is_method_handle_return(address return_pc);
560 // jvmti support:
561 void post_compiled_method_load_event();
563 // verify operations
564 void verify();
565 void verify_scopes();
566 void verify_interrupt_point(address interrupt_point);
568 // printing support
569 void print() const;
570 void print_code();
571 void print_relocations() PRODUCT_RETURN;
572 void print_pcs() PRODUCT_RETURN;
573 void print_scopes() PRODUCT_RETURN;
574 void print_dependencies() PRODUCT_RETURN;
575 void print_value_on(outputStream* st) const PRODUCT_RETURN;
576 void print_calls(outputStream* st) PRODUCT_RETURN;
577 void print_handler_table() PRODUCT_RETURN;
578 void print_nul_chk_table() PRODUCT_RETURN;
579 void print_nmethod(bool print_code);
581 void print_on(outputStream* st, const char* title) const;
583 // Logging
584 void log_identity(xmlStream* log) const;
585 void log_new_nmethod() const;
586 void log_state_change() const;
588 // Prints block-level comments, including nmethod specific block labels:
589 virtual void print_block_comment(outputStream* stream, address block_begin) {
590 print_nmethod_labels(stream, block_begin);
591 CodeBlob::print_block_comment(stream, block_begin);
592 }
593 void print_nmethod_labels(outputStream* stream, address block_begin);
595 // Prints a comment for one native instruction (reloc info, pc desc)
596 void print_code_comment_on(outputStream* st, int column, address begin, address end);
597 static void print_statistics() PRODUCT_RETURN;
599 // Compiler task identification. Note that all OSR methods
600 // are numbered in an independent sequence if CICountOSR is true,
601 // and native method wrappers are also numbered independently if
602 // CICountNative is true.
603 int compile_id() const { return _compile_id; }
604 const char* compile_kind() const;
606 // For debugging
607 // CompiledIC* IC_at(char* p) const;
608 // PrimitiveIC* primitiveIC_at(char* p) const;
609 oop embeddedOop_at(address p);
611 // tells if any of this method's dependencies have been invalidated
612 // (this is expensive!)
613 bool check_all_dependencies();
615 // tells if this compiled method is dependent on the given changes,
616 // and the changes have invalidated it
617 bool check_dependency_on(DepChange& changes);
619 // Evolution support. Tells if this compiled method is dependent on any of
620 // methods m() of class dependee, such that if m() in dependee is replaced,
621 // this compiled method will have to be deoptimized.
622 bool is_evol_dependent_on(klassOop dependee);
624 // Fast breakpoint support. Tells if this compiled method is
625 // dependent on the given method. Returns true if this nmethod
626 // corresponds to the given method as well.
627 bool is_dependent_on_method(methodOop dependee);
629 // is it ok to patch at address?
630 bool is_patchable_at(address instr_address);
632 // UseBiasedLocking support
633 ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
634 return _compiled_synchronized_native_basic_lock_owner_sp_offset;
635 }
636 ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
637 return _compiled_synchronized_native_basic_lock_sp_offset;
638 }
640 // support for code generation
641 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
642 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
643 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
645 };
647 // Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
648 class nmethodLocker : public StackObj {
649 nmethod* _nm;
651 static void lock_nmethod(nmethod* nm); // note: nm can be NULL
652 static void unlock_nmethod(nmethod* nm); // (ditto)
654 public:
655 nmethodLocker(address pc); // derive nm from pc
656 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
657 nmethodLocker() { _nm = NULL; }
658 ~nmethodLocker() { unlock_nmethod(_nm); }
660 nmethod* code() { return _nm; }
661 void set_code(nmethod* new_nm) {
662 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
663 _nm = new_nm;
664 lock_nmethod(_nm);
665 }
666 };