Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 #define SHARE_VM_CODE_NMETHOD_HPP
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
32 // This class is used internally by nmethods, to cache
33 // exception/pc/handler information.
35 class ExceptionCache : public CHeapObj<mtCode> {
36 friend class VMStructs;
37 private:
38 enum { cache_size = 16 };
39 Klass* _exception_type;
40 address _pc[cache_size];
41 address _handler[cache_size];
42 int _count;
43 ExceptionCache* _next;
45 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
46 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
48 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49 int count() { return _count; }
50 void increment_count() { _count++; }
52 public:
54 ExceptionCache(Handle exception, address pc, address handler);
56 Klass* exception_type() { return _exception_type; }
57 ExceptionCache* next() { return _next; }
58 void set_next(ExceptionCache *ec) { _next = ec; }
60 address match(Handle exception, address pc);
61 bool match_exception_with_space(Handle exception) ;
62 address test_address(address addr);
63 bool add_address_and_handler(address addr, address handler) ;
64 };
67 // cache pc descs found in earlier inquiries
68 class PcDescCache VALUE_OBJ_CLASS_SPEC {
69 friend class VMStructs;
70 private:
71 enum { cache_size = 4 };
72 PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
73 public:
74 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
75 void reset_to(PcDesc* initial_pc_desc);
76 PcDesc* find_pc_desc(int pc_offset, bool approximate);
77 void add_pc_desc(PcDesc* pc_desc);
78 PcDesc* last_pc_desc() { return _pc_descs[0]; }
79 };
82 // nmethods (native methods) are the compiled code versions of Java methods.
83 //
84 // An nmethod contains:
85 // - header (the nmethod structure)
86 // [Relocation]
87 // - relocation information
88 // - constant part (doubles, longs and floats used in nmethod)
89 // - oop table
90 // [Code]
91 // - code body
92 // - exception handler
93 // - stub code
94 // [Debugging information]
95 // - oop array
96 // - data array
97 // - pcs
98 // [Exception handler table]
99 // - handler entry point array
100 // [Implicit Null Pointer exception table]
101 // - implicit null table array
103 class Dependencies;
104 class ExceptionHandlerTable;
105 class ImplicitExceptionTable;
106 class AbstractCompiler;
107 class xmlStream;
109 class nmethod : public CodeBlob {
110 friend class VMStructs;
111 friend class NMethodSweeper;
112 friend class CodeCache; // scavengable oops
113 private:
114 // Shared fields for all nmethod's
115 Method* _method;
116 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
117 jmethodID _jmethod_id; // Cache of method()->jmethod_id()
119 // To support simple linked-list chaining of nmethods:
120 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
121 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
123 static nmethod* volatile _oops_do_mark_nmethods;
124 nmethod* volatile _oops_do_mark_link;
126 AbstractCompiler* _compiler; // The compiler which compiled this nmethod
128 // offsets for entry points
129 address _entry_point; // entry point with class check
130 address _verified_entry_point; // entry point without class check
131 address _osr_entry_point; // entry point for on stack replacement
133 // Offsets for different nmethod parts
134 int _exception_offset;
135 // All deoptee's will resume execution at this location described by
136 // this offset.
137 int _deoptimize_offset;
138 // All deoptee's at a MethodHandle call site will resume execution
139 // at this location described by this offset.
140 int _deoptimize_mh_offset;
141 // Offset of the unwind handler if it exists
142 int _unwind_handler_offset;
144 #ifdef HAVE_DTRACE_H
145 int _trap_offset;
146 #endif // def HAVE_DTRACE_H
147 int _consts_offset;
148 int _stub_offset;
149 int _oops_offset; // offset to where embedded oop table begins (inside data)
150 int _metadata_offset; // embedded meta data table
151 int _scopes_data_offset;
152 int _scopes_pcs_offset;
153 int _dependencies_offset;
154 int _handler_table_offset;
155 int _nul_chk_table_offset;
156 int _nmethod_end_offset;
158 // location in frame (offset for sp) that deopt can store the original
159 // pc during a deopt.
160 int _orig_pc_offset;
162 int _compile_id; // which compilation made this nmethod
163 int _comp_level; // compilation level
165 // protected by CodeCache_lock
166 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
168 bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
169 bool _marked_for_deoptimization; // Used for stack deoptimization
171 // used by jvmti to track if an unload event has been posted for this nmethod.
172 bool _unload_reported;
174 // set during construction
175 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
176 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
177 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
178 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
180 // Protected by Patching_lock
181 volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
183 #ifdef ASSERT
184 bool _oops_are_stale; // indicates that it's no longer safe to access oops section
185 #endif
187 enum { in_use = 0, // executable nmethod
188 not_entrant = 1, // marked for deoptimization but activations may still exist,
189 // will be transformed to zombie when all activations are gone
190 zombie = 2, // no activations exist, nmethod is ready for purge
191 unloaded = 3 }; // there should be no activations, should not be called,
192 // will be transformed to zombie immediately
194 jbyte _scavenge_root_state;
196 #if INCLUDE_RTM_OPT
197 // RTM state at compile time. Used during deoptimization to decide
198 // whether to restart collecting RTM locking abort statistic again.
199 RTMState _rtm_state;
200 #endif
202 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
203 // and is not made into a zombie. However, once the nmethod is made into
204 // a zombie, it will be locked one final time if CompiledMethodUnload
205 // event processing needs to be done.
206 jint _lock_count;
208 // not_entrant method removal. Each mark_sweep pass will update
209 // this mark to current sweep invocation count if it is seen on the
210 // stack. An not_entrant method can be removed when there are no
211 // more activations, i.e., when the _stack_traversal_mark is less than
212 // current sweep traversal index.
213 long _stack_traversal_mark;
215 // The _hotness_counter indicates the hotness of a method. The higher
216 // the value the hotter the method. The hotness counter of a nmethod is
217 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
218 // is active while stack scanning (mark_active_nmethods()). The hotness
219 // counter is decreased (by 1) while sweeping.
220 int _hotness_counter;
222 ExceptionCache *_exception_cache;
223 PcDescCache _pc_desc_cache;
225 // These are used for compiled synchronized native methods to
226 // locate the owner and stack slot for the BasicLock so that we can
227 // properly revoke the bias of the owner if necessary. They are
228 // needed because there is no debug information for compiled native
229 // wrappers and the oop maps are insufficient to allow
230 // frame::retrieve_receiver() to work. Currently they are expected
231 // to be byte offsets from the Java stack pointer for maximum code
232 // sharing between platforms. Note that currently biased locking
233 // will never cause Class instances to be biased but this code
234 // handles the static synchronized case as well.
235 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
236 // for non-static native wrapper frames.
237 ByteSize _native_receiver_sp_offset;
238 ByteSize _native_basic_lock_sp_offset;
240 friend class nmethodLocker;
242 // For native wrappers
243 nmethod(Method* method,
244 int nmethod_size,
245 int compile_id,
246 CodeOffsets* offsets,
247 CodeBuffer *code_buffer,
248 int frame_size,
249 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
250 ByteSize basic_lock_sp_offset, /* synchronized natives only */
251 OopMapSet* oop_maps);
253 #ifdef HAVE_DTRACE_H
254 // For native wrappers
255 nmethod(Method* method,
256 int nmethod_size,
257 CodeOffsets* offsets,
258 CodeBuffer *code_buffer,
259 int frame_size);
260 #endif // def HAVE_DTRACE_H
262 // Creation support
263 nmethod(Method* method,
264 int nmethod_size,
265 int compile_id,
266 int entry_bci,
267 CodeOffsets* offsets,
268 int orig_pc_offset,
269 DebugInformationRecorder *recorder,
270 Dependencies* dependencies,
271 CodeBuffer *code_buffer,
272 int frame_size,
273 OopMapSet* oop_maps,
274 ExceptionHandlerTable* handler_table,
275 ImplicitExceptionTable* nul_chk_table,
276 AbstractCompiler* compiler,
277 int comp_level);
279 // helper methods
280 void* operator new(size_t size, int nmethod_size) throw();
282 const char* reloc_string_for(u_char* begin, u_char* end);
283 // Returns true if this thread changed the state of the nmethod or
284 // false if another thread performed the transition.
285 bool make_not_entrant_or_zombie(unsigned int state);
286 void inc_decompile_count();
288 // Used to manipulate the exception cache
289 void add_exception_cache_entry(ExceptionCache* new_entry);
290 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
292 // Inform external interfaces that a compiled method has been unloaded
293 void post_compiled_method_unload();
295 // Initailize fields to their default values
296 void init_defaults();
298 public:
299 // create nmethod with entry_bci
300 static nmethod* new_nmethod(methodHandle method,
301 int compile_id,
302 int entry_bci,
303 CodeOffsets* offsets,
304 int orig_pc_offset,
305 DebugInformationRecorder* recorder,
306 Dependencies* dependencies,
307 CodeBuffer *code_buffer,
308 int frame_size,
309 OopMapSet* oop_maps,
310 ExceptionHandlerTable* handler_table,
311 ImplicitExceptionTable* nul_chk_table,
312 AbstractCompiler* compiler,
313 int comp_level);
315 static nmethod* new_native_nmethod(methodHandle method,
316 int compile_id,
317 CodeBuffer *code_buffer,
318 int vep_offset,
319 int frame_complete,
320 int frame_size,
321 ByteSize receiver_sp_offset,
322 ByteSize basic_lock_sp_offset,
323 OopMapSet* oop_maps);
325 #ifdef HAVE_DTRACE_H
326 // The method we generate for a dtrace probe has to look
327 // like an nmethod as far as the rest of the system is concerned
328 // which is somewhat unfortunate.
329 static nmethod* new_dtrace_nmethod(methodHandle method,
330 CodeBuffer *code_buffer,
331 int vep_offset,
332 int trap_offset,
333 int frame_complete,
334 int frame_size);
336 int trap_offset() const { return _trap_offset; }
337 address trap_address() const { return insts_begin() + _trap_offset; }
339 #endif // def HAVE_DTRACE_H
341 // accessors
342 Method* method() const { return _method; }
343 AbstractCompiler* compiler() const { return _compiler; }
345 // type info
346 bool is_nmethod() const { return true; }
347 bool is_java_method() const { return !method()->is_native(); }
348 bool is_native_method() const { return method()->is_native(); }
349 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
351 bool is_compiled_by_c1() const;
352 bool is_compiled_by_c2() const;
353 bool is_compiled_by_shark() const;
355 // boundaries for different parts
356 address consts_begin () const { return header_begin() + _consts_offset ; }
357 address consts_end () const { return header_begin() + code_offset() ; }
358 address insts_begin () const { return header_begin() + code_offset() ; }
359 address insts_end () const { return header_begin() + _stub_offset ; }
360 address stub_begin () const { return header_begin() + _stub_offset ; }
361 address stub_end () const { return header_begin() + _oops_offset ; }
362 address exception_begin () const { return header_begin() + _exception_offset ; }
363 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
364 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
365 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
366 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
367 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
369 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
370 Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
372 address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
373 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
374 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
375 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
376 address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
377 address dependencies_end () const { return header_begin() + _handler_table_offset ; }
378 address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
379 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
380 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
381 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
383 // Sizes
384 int consts_size () const { return consts_end () - consts_begin (); }
385 int insts_size () const { return insts_end () - insts_begin (); }
386 int stub_size () const { return stub_end () - stub_begin (); }
387 int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
388 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
389 int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
390 int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
391 int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
392 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
393 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
395 int total_size () const;
397 void dec_hotness_counter() { _hotness_counter--; }
398 void set_hotness_counter(int val) { _hotness_counter = val; }
399 int hotness_counter() const { return _hotness_counter; }
401 // Containment
402 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
403 bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
404 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
405 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
406 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
407 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
408 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
409 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
410 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
412 // entry points
413 address entry_point() const { return _entry_point; } // normal entry point
414 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
416 // flag accessing and manipulation
417 bool is_in_use() const { return _state == in_use; }
418 bool is_alive() const { return _state == in_use || _state == not_entrant; }
419 bool is_not_entrant() const { return _state == not_entrant; }
420 bool is_zombie() const { return _state == zombie; }
421 bool is_unloaded() const { return _state == unloaded; }
423 #if INCLUDE_RTM_OPT
424 // rtm state accessing and manipulating
425 RTMState rtm_state() const { return _rtm_state; }
426 void set_rtm_state(RTMState state) { _rtm_state = state; }
427 #endif
429 // Make the nmethod non entrant. The nmethod will continue to be
430 // alive. It is used when an uncommon trap happens. Returns true
431 // if this thread changed the state of the nmethod or false if
432 // another thread performed the transition.
433 bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
434 bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
436 // used by jvmti to track if the unload event has been reported
437 bool unload_reported() { return _unload_reported; }
438 void set_unload_reported() { _unload_reported = true; }
440 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
441 void mark_for_deoptimization() { _marked_for_deoptimization = true; }
443 void make_unloaded(BoolObjectClosure* is_alive, oop cause);
445 bool has_dependencies() { return dependencies_size() != 0; }
446 void flush_dependencies(BoolObjectClosure* is_alive);
447 bool has_flushed_dependencies() { return _has_flushed_dependencies; }
448 void set_has_flushed_dependencies() {
449 assert(!has_flushed_dependencies(), "should only happen once");
450 _has_flushed_dependencies = 1;
451 }
453 bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
454 void mark_for_reclamation() { _marked_for_reclamation = 1; }
456 bool has_unsafe_access() const { return _has_unsafe_access; }
457 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
459 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
460 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
462 bool is_lazy_critical_native() const { return _lazy_critical_native; }
463 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
465 bool has_wide_vectors() const { return _has_wide_vectors; }
466 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
468 int comp_level() const { return _comp_level; }
470 // Support for oops in scopes and relocs:
471 // Note: index 0 is reserved for null.
472 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
473 oop* oop_addr_at(int index) const { // for GC
474 // relocation indexes are biased by 1 (because 0 is reserved)
475 assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
476 assert(!_oops_are_stale, "oops are stale");
477 return &oops_begin()[index - 1];
478 }
480 // Support for meta data in scopes and relocs:
481 // Note: index 0 is reserved for null.
482 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
483 Metadata** metadata_addr_at(int index) const { // for GC
484 // relocation indexes are biased by 1 (because 0 is reserved)
485 assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
486 return &metadata_begin()[index - 1];
487 }
489 void copy_values(GrowableArray<jobject>* oops);
490 void copy_values(GrowableArray<Metadata*>* metadata);
492 // Relocation support
493 private:
494 void fix_oop_relocations(address begin, address end, bool initialize_immediates);
495 inline void initialize_immediate_oop(oop* dest, jobject handle);
497 public:
498 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
499 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
500 void verify_oop_relocations();
502 bool is_at_poll_return(address pc);
503 bool is_at_poll_or_poll_return(address pc);
505 // Scavengable oop support
506 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
507 protected:
508 enum { sl_on_list = 0x01, sl_marked = 0x10 };
509 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
510 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
511 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
512 #ifndef PRODUCT
513 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
514 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
515 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
516 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
517 #endif //PRODUCT
518 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
519 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
521 public:
523 // Sweeper support
524 long stack_traversal_mark() { return _stack_traversal_mark; }
525 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
527 // Exception cache support
528 ExceptionCache* exception_cache() const { return _exception_cache; }
529 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
530 address handler_for_exception_and_pc(Handle exception, address pc);
531 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
532 void remove_from_exception_cache(ExceptionCache* ec);
534 // implicit exceptions support
535 address continuation_for_implicit_exception(address pc);
537 // On-stack replacement support
538 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
539 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
540 void invalidate_osr_method();
541 nmethod* osr_link() const { return _osr_link; }
542 void set_osr_link(nmethod *n) { _osr_link = n; }
544 // tells whether frames described by this nmethod can be deoptimized
545 // note: native wrappers cannot be deoptimized.
546 bool can_be_deoptimized() const { return is_java_method(); }
548 // Inline cache support
549 void clear_inline_caches();
550 void cleanup_inline_caches();
551 bool inlinecache_check_contains(address addr) const {
552 return (addr >= code_begin() && addr < verified_entry_point());
553 }
555 // Check that all metadata is still alive
556 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
558 // unlink and deallocate this nmethod
559 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
560 // expected to use any other private methods/data in this class.
562 protected:
563 void flush();
565 public:
566 // When true is returned, it is unsafe to remove this nmethod even if
567 // it is a zombie, since the VM or the ServiceThread might still be
568 // using it.
569 bool is_locked_by_vm() const { return _lock_count >0; }
571 // See comment at definition of _last_seen_on_stack
572 void mark_as_seen_on_stack();
573 bool can_not_entrant_be_converted();
575 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
576 void set_method(Method* method) { _method = method; }
578 // GC support
579 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
580 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
582 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
583 OopClosure* f);
584 void oops_do(OopClosure* f) { oops_do(f, false); }
585 void oops_do(OopClosure* f, bool allow_zombie);
586 bool detect_scavenge_root_oops();
587 void verify_scavenge_root_oops() PRODUCT_RETURN;
589 bool test_set_oops_do_mark();
590 static void oops_do_marking_prologue();
591 static void oops_do_marking_epilogue();
592 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
593 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
595 // ScopeDesc for an instruction
596 ScopeDesc* scope_desc_at(address pc);
598 private:
599 ScopeDesc* scope_desc_in(address begin, address end);
601 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
603 PcDesc* find_pc_desc_internal(address pc, bool approximate);
605 PcDesc* find_pc_desc(address pc, bool approximate) {
606 PcDesc* desc = _pc_desc_cache.last_pc_desc();
607 if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
608 return desc;
609 }
610 return find_pc_desc_internal(pc, approximate);
611 }
613 public:
614 // ScopeDesc retrieval operation
615 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
616 // pc_desc_near returns the first PcDesc at or after the givne pc.
617 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
619 public:
620 // copying of debugging information
621 void copy_scopes_pcs(PcDesc* pcs, int count);
622 void copy_scopes_data(address buffer, int size);
624 // Deopt
625 // Return true is the PC is one would expect if the frame is being deopted.
626 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
627 bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
628 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
629 // Accessor/mutator for the original pc of a frame before a frame was deopted.
630 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
631 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
633 static address get_deopt_original_pc(const frame* fr);
635 // MethodHandle
636 bool is_method_handle_return(address return_pc);
638 // jvmti support:
639 void post_compiled_method_load_event();
640 jmethodID get_and_cache_jmethod_id();
642 // verify operations
643 void verify();
644 void verify_scopes();
645 void verify_interrupt_point(address interrupt_point);
647 // printing support
648 void print() const;
649 void print_code();
650 void print_relocations() PRODUCT_RETURN;
651 void print_pcs() PRODUCT_RETURN;
652 void print_scopes() PRODUCT_RETURN;
653 void print_dependencies() PRODUCT_RETURN;
654 void print_value_on(outputStream* st) const PRODUCT_RETURN;
655 void print_calls(outputStream* st) PRODUCT_RETURN;
656 void print_handler_table() PRODUCT_RETURN;
657 void print_nul_chk_table() PRODUCT_RETURN;
658 void print_nmethod(bool print_code);
660 // need to re-define this from CodeBlob else the overload hides it
661 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
662 void print_on(outputStream* st, const char* msg) const;
664 // Logging
665 void log_identity(xmlStream* log) const;
666 void log_new_nmethod() const;
667 void log_state_change() const;
669 // Prints block-level comments, including nmethod specific block labels:
670 virtual void print_block_comment(outputStream* stream, address block_begin) const {
671 print_nmethod_labels(stream, block_begin);
672 CodeBlob::print_block_comment(stream, block_begin);
673 }
674 void print_nmethod_labels(outputStream* stream, address block_begin) const;
676 // Prints a comment for one native instruction (reloc info, pc desc)
677 void print_code_comment_on(outputStream* st, int column, address begin, address end);
678 static void print_statistics() PRODUCT_RETURN;
680 // Compiler task identification. Note that all OSR methods
681 // are numbered in an independent sequence if CICountOSR is true,
682 // and native method wrappers are also numbered independently if
683 // CICountNative is true.
684 int compile_id() const { return _compile_id; }
685 const char* compile_kind() const;
687 // For debugging
688 // CompiledIC* IC_at(char* p) const;
689 // PrimitiveIC* primitiveIC_at(char* p) const;
690 oop embeddedOop_at(address p);
692 // tells if any of this method's dependencies have been invalidated
693 // (this is expensive!)
694 bool check_all_dependencies();
696 // tells if this compiled method is dependent on the given changes,
697 // and the changes have invalidated it
698 bool check_dependency_on(DepChange& changes);
700 // Evolution support. Tells if this compiled method is dependent on any of
701 // methods m() of class dependee, such that if m() in dependee is replaced,
702 // this compiled method will have to be deoptimized.
703 bool is_evol_dependent_on(Klass* dependee);
705 // Fast breakpoint support. Tells if this compiled method is
706 // dependent on the given method. Returns true if this nmethod
707 // corresponds to the given method as well.
708 bool is_dependent_on_method(Method* dependee);
710 // is it ok to patch at address?
711 bool is_patchable_at(address instr_address);
713 // UseBiasedLocking support
714 ByteSize native_receiver_sp_offset() {
715 return _native_receiver_sp_offset;
716 }
717 ByteSize native_basic_lock_sp_offset() {
718 return _native_basic_lock_sp_offset;
719 }
721 // support for code generation
722 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
723 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
724 static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
726 // RedefineClasses support. Mark metadata in nmethods as on_stack so that
727 // redefine classes doesn't purge it.
728 static void mark_on_stack(nmethod* nm) {
729 nm->metadata_do(Metadata::mark_on_stack);
730 }
731 void metadata_do(void f(Metadata*));
732 };
734 // Locks an nmethod so its code will not get removed and it will not
735 // be made into a zombie, even if it is a not_entrant method. After the
736 // nmethod becomes a zombie, if CompiledMethodUnload event processing
737 // needs to be done, then lock_nmethod() is used directly to keep the
738 // generated code from being reused too early.
739 class nmethodLocker : public StackObj {
740 nmethod* _nm;
742 public:
744 // note: nm can be NULL
745 // Only JvmtiDeferredEvent::compiled_method_unload_event()
746 // should pass zombie_ok == true.
747 static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
748 static void unlock_nmethod(nmethod* nm); // (ditto)
750 nmethodLocker(address pc); // derive nm from pc
751 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
752 nmethodLocker() { _nm = NULL; }
753 ~nmethodLocker() { unlock_nmethod(_nm); }
755 nmethod* code() { return _nm; }
756 void set_code(nmethod* new_nm) {
757 unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
758 _nm = new_nm;
759 lock_nmethod(_nm);
760 }
761 };
763 #endif // SHARE_VM_CODE_NMETHOD_HPP