Mon, 11 Feb 2013 14:06:22 -0500
8007320: NPG: move method annotations
Summary: allocate method annotations and attach to ConstMethod if present
Reviewed-by: dcubed, jiangli, sspitsyn, iklam
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP
26 #define SHARE_VM_OOPS_METHODOOP_HPP
28 #include "classfile/vmSymbols.hpp"
29 #include "code/compressedStream.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/invocationCounter.hpp"
32 #include "oops/annotations.hpp"
33 #include "oops/constantPool.hpp"
34 #include "oops/instanceKlass.hpp"
35 #include "oops/oop.hpp"
36 #include "oops/typeArrayOop.hpp"
37 #include "utilities/accessFlags.hpp"
38 #include "utilities/growableArray.hpp"
40 // A Method* represents a Java method.
41 //
42 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
43 // so keeping the size of this structure small has a big impact on footprint.
44 //
45 // We put all oops and method_size first for better gc cache locality.
46 //
47 // The actual bytecodes are inlined after the end of the Method struct.
48 //
49 // There are bits in the access_flags telling whether inlined tables are present.
50 // Note that accessing the line number and local variable tables is not performance critical at all.
51 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
52 // to it fast.
53 //
54 // The line number table is compressed and inlined following the byte codes. It is found as the first
55 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
56 // after the line number table, and indexed from the end of the method. We do not compress the checked
57 // exceptions table since the average length is less than 2, and do not bother to compress the local
58 // variable table either since it is mostly absent.
59 //
60 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
61 //
62 // |------------------------------------------------------|
63 // | header |
64 // | klass |
65 // |------------------------------------------------------|
66 // | ConstMethod* (oop) |
67 // |------------------------------------------------------|
68 // | methodData (oop) |
69 // | interp_invocation_count |
70 // |------------------------------------------------------|
71 // | access_flags |
72 // | vtable_index |
73 // |------------------------------------------------------|
74 // | result_index (C++ interpreter only) |
75 // |------------------------------------------------------|
76 // | method_size | intrinsic_id| flags |
77 // |------------------------------------------------------|
78 // | throwout_count | num_breakpoints |
79 // |------------------------------------------------------|
80 // | invocation_counter |
81 // | backedge_counter |
82 // |------------------------------------------------------|
83 // | prev_time (tiered only, 64 bit wide) |
84 // | |
85 // |------------------------------------------------------|
86 // | rate (tiered) |
87 // |------------------------------------------------------|
88 // | code (pointer) |
89 // | i2i (pointer) |
90 // | adapter (pointer) |
91 // | from_compiled_entry (pointer) |
92 // | from_interpreted_entry (pointer) |
93 // |------------------------------------------------------|
94 // | native_function (present only if native) |
95 // | signature_handler (present only if native) |
96 // |------------------------------------------------------|
99 class CheckedExceptionElement;
100 class LocalVariableTableElement;
101 class AdapterHandlerEntry;
102 class MethodData;
103 class ConstMethod;
104 class InlineTableSizes;
105 class KlassSizeStats;
107 class Method : public Metadata {
108 friend class VMStructs;
109 private:
110 ConstMethod* _constMethod; // Method read-only data.
111 MethodData* _method_data;
112 int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
113 AccessFlags _access_flags; // Access flags
114 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
115 // note: can have vtables with >2**16 elements (because of inheritance)
116 #ifdef CC_INTERP
117 int _result_index; // C++ interpreter needs for converting results to/from stack
118 #endif
119 u2 _method_size; // size of this object
120 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
121 u1 _jfr_towrite : 1, // Flags
122 _force_inline : 1,
123 _hidden : 1,
124 _dont_inline : 1,
125 : 4;
126 u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
127 u2 _number_of_breakpoints; // fullspeed debugging support
128 InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
129 InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
131 #ifdef TIERED
132 float _rate; // Events (invocation and backedge counter increments) per millisecond
133 jlong _prev_time; // Previous time the rate was acquired
134 #endif
136 #ifndef PRODUCT
137 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
138 #endif
139 // Entry point for calling both from and to the interpreter.
140 address _i2i_entry; // All-args-on-stack calling convention
141 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
142 AdapterHandlerEntry* _adapter;
143 // Entry point for calling from compiled code, to compiled code if it exists
144 // or else the interpreter.
145 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
146 // The entry point for calling both from and to compiled code is
147 // "_code->entry_point()". Because of tiered compilation and de-opt, this
148 // field can come and go. It can transition from NULL to not-null at any
149 // time (whenever a compile completes). It can transition from not-null to
150 // NULL only at safepoints (because of a de-opt).
151 nmethod* volatile _code; // Points to the corresponding piece of native code
152 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
154 // Constructor
155 Method(ConstMethod* xconst, AccessFlags access_flags, int size);
156 public:
158 static Method* allocate(ClassLoaderData* loader_data,
159 int byte_code_size,
160 AccessFlags access_flags,
161 InlineTableSizes* sizes,
162 ConstMethod::MethodType method_type,
163 TRAPS);
165 // CDS and vtbl checking can create an empty Method to get vtbl pointer.
166 Method(){}
168 // The Method vtable is restored by this call when the Method is in the
169 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
170 // all the gory details. SA, dtrace and pstack helpers distinguish metadata
171 // by their vtable.
172 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
173 bool is_method() const volatile { return true; }
175 // accessors for instance variables
177 ConstMethod* constMethod() const { return _constMethod; }
178 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
181 static address make_adapters(methodHandle mh, TRAPS);
182 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
183 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
185 // access flag
186 AccessFlags access_flags() const { return _access_flags; }
187 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
189 // name
190 Symbol* name() const { return constants()->symbol_at(name_index()); }
191 int name_index() const { return constMethod()->name_index(); }
192 void set_name_index(int index) { constMethod()->set_name_index(index); }
194 // signature
195 Symbol* signature() const { return constants()->symbol_at(signature_index()); }
196 int signature_index() const { return constMethod()->signature_index(); }
197 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
199 // generics support
200 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
201 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
202 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
204 // annotations support
205 AnnotationArray* annotations() const {
206 return constMethod()->method_annotations();
207 }
208 AnnotationArray* parameter_annotations() const {
209 return constMethod()->parameter_annotations();
210 }
211 AnnotationArray* annotation_default() const {
212 return constMethod()->default_annotations();
213 }
214 AnnotationArray* type_annotations() const {
215 return constMethod()->type_annotations();
216 }
218 #ifdef CC_INTERP
219 void set_result_index(BasicType type);
220 int result_index() { return _result_index; }
221 #endif
223 // Helper routine: get klass name + "." + method name + signature as
224 // C string, for the purpose of providing more useful NoSuchMethodErrors
225 // and fatal error handling. The string is allocated in resource
226 // area if a buffer is not provided by the caller.
227 char* name_and_sig_as_C_string() const;
228 char* name_and_sig_as_C_string(char* buf, int size) const;
230 // Static routine in the situations we don't have a Method*
231 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
232 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
234 Bytecodes::Code java_code_at(int bci) const {
235 return Bytecodes::java_code_at(this, bcp_from(bci));
236 }
237 Bytecodes::Code code_at(int bci) const {
238 return Bytecodes::code_at(this, bcp_from(bci));
239 }
241 // JVMTI breakpoints
242 Bytecodes::Code orig_bytecode_at(int bci) const;
243 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
244 void set_breakpoint(int bci);
245 void clear_breakpoint(int bci);
246 void clear_all_breakpoints();
247 // Tracking number of breakpoints, for fullspeed debugging.
248 // Only mutated by VM thread.
249 u2 number_of_breakpoints() const { return _number_of_breakpoints; }
250 void incr_number_of_breakpoints() { ++_number_of_breakpoints; }
251 void decr_number_of_breakpoints() { --_number_of_breakpoints; }
252 // Initialization only
253 void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
255 // index into InstanceKlass methods() array
256 // note: also used by jfr
257 u2 method_idnum() const { return constMethod()->method_idnum(); }
258 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
260 // code size
261 int code_size() const { return constMethod()->code_size(); }
263 // method size
264 int method_size() const { return _method_size; }
265 void set_method_size(int size) {
266 assert(0 <= size && size < (1 << 16), "invalid method size");
267 _method_size = size;
268 }
270 // constant pool for Klass* holding this method
271 ConstantPool* constants() const { return constMethod()->constants(); }
272 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
274 // max stack
275 // return original max stack size for method verification
276 int verifier_max_stack() const { return constMethod()->max_stack(); }
277 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
278 void set_max_stack(int size) { constMethod()->set_max_stack(size); }
280 // max locals
281 int max_locals() const { return constMethod()->max_locals(); }
282 void set_max_locals(int size) { constMethod()->set_max_locals(size); }
284 int highest_comp_level() const;
285 void set_highest_comp_level(int level);
286 int highest_osr_comp_level() const;
287 void set_highest_osr_comp_level(int level);
289 // Count of times method was exited via exception while interpreting
290 void interpreter_throwout_increment() {
291 if (_interpreter_throwout_count < 65534) {
292 _interpreter_throwout_count++;
293 }
294 }
296 int interpreter_throwout_count() const { return _interpreter_throwout_count; }
297 void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
299 // size of parameters
300 int size_of_parameters() const { return constMethod()->size_of_parameters(); }
301 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
303 bool has_stackmap_table() const {
304 return constMethod()->has_stackmap_table();
305 }
307 Array<u1>* stackmap_data() const {
308 return constMethod()->stackmap_data();
309 }
311 void set_stackmap_data(Array<u1>* sd) {
312 constMethod()->set_stackmap_data(sd);
313 }
315 // exception handler table
316 bool has_exception_handler() const
317 { return constMethod()->has_exception_handler(); }
318 int exception_table_length() const
319 { return constMethod()->exception_table_length(); }
320 ExceptionTableElement* exception_table_start() const
321 { return constMethod()->exception_table_start(); }
323 // Finds the first entry point bci of an exception handler for an
324 // exception of klass ex_klass thrown at throw_bci. A value of NULL
325 // for ex_klass indicates that the exception klass is not known; in
326 // this case it matches any constraint class. Returns -1 if the
327 // exception cannot be handled in this method. The handler
328 // constraint classes are loaded if necessary. Note that this may
329 // throw an exception if loading of the constraint classes causes
330 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
331 // If an exception is thrown, returns the bci of the
332 // exception handler which caused the exception to be thrown, which
333 // is needed for proper retries. See, for example,
334 // InterpreterRuntime::exception_handler_for_exception.
335 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
337 // method data access
338 MethodData* method_data() const {
339 return _method_data;
340 }
341 void set_method_data(MethodData* data) {
342 _method_data = data;
343 }
345 // invocation counter
346 InvocationCounter* invocation_counter() { return &_invocation_counter; }
347 InvocationCounter* backedge_counter() { return &_backedge_counter; }
349 #ifdef TIERED
350 // We are reusing interpreter_invocation_count as a holder for the previous event count!
351 // We can do that since interpreter_invocation_count is not used in tiered.
352 int prev_event_count() const { return _interpreter_invocation_count; }
353 void set_prev_event_count(int count) { _interpreter_invocation_count = count; }
354 jlong prev_time() const { return _prev_time; }
355 void set_prev_time(jlong time) { _prev_time = time; }
356 float rate() const { return _rate; }
357 void set_rate(float rate) { _rate = rate; }
358 #endif
360 int invocation_count();
361 int backedge_count();
363 bool was_executed_more_than(int n);
364 bool was_never_executed() { return !was_executed_more_than(0); }
366 static void build_interpreter_method_data(methodHandle method, TRAPS);
368 int interpreter_invocation_count() {
369 if (TieredCompilation) return invocation_count();
370 else return _interpreter_invocation_count;
371 }
372 void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
373 int increment_interpreter_invocation_count() {
374 if (TieredCompilation) ShouldNotReachHere();
375 return ++_interpreter_invocation_count;
376 }
378 #ifndef PRODUCT
379 int compiled_invocation_count() const { return _compiled_invocation_count; }
380 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
381 #endif // not PRODUCT
383 // Clear (non-shared space) pointers which could not be relevant
384 // if this (shared) method were mapped into another JVM.
385 void remove_unshareable_info();
387 // nmethod/verified compiler entry
388 address verified_code_entry();
389 bool check_code() const; // Not inline to avoid circular ref
390 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
391 void clear_code(); // Clear out any compiled code
392 static void set_code(methodHandle mh, nmethod* code);
393 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
394 address get_i2c_entry();
395 address get_c2i_entry();
396 address get_c2i_unverified_entry();
397 AdapterHandlerEntry* adapter() { return _adapter; }
398 // setup entry points
399 void link_method(methodHandle method, TRAPS);
400 // clear entry points. Used by sharing code
401 void unlink_method();
403 // vtable index
404 enum VtableIndexFlag {
405 // Valid vtable indexes are non-negative (>= 0).
406 // These few negative values are used as sentinels.
407 highest_unused_vtable_index_value = -5,
408 invalid_vtable_index = -4, // distinct from any valid vtable index
409 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
410 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
411 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
412 };
413 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
414 int vtable_index() const { assert(valid_vtable_index(), "");
415 return _vtable_index; }
416 void set_vtable_index(int index) { _vtable_index = index; }
418 // interpreter entry
419 address interpreter_entry() const { return _i2i_entry; }
420 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
421 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
423 // native function (used for native methods only)
424 enum {
425 native_bind_event_is_interesting = true
426 };
427 address native_function() const { return *(native_function_addr()); }
428 address critical_native_function();
430 // Must specify a real function (not NULL).
431 // Use clear_native_function() to unregister.
432 void set_native_function(address function, bool post_event_flag);
433 bool has_native_function() const;
434 void clear_native_function();
436 // signature handler (used for native methods only)
437 address signature_handler() const { return *(signature_handler_addr()); }
438 void set_signature_handler(address handler);
440 // Interpreter oopmap support
441 void mask_for(int bci, InterpreterOopMap* mask);
443 #ifndef PRODUCT
444 // operations on invocation counter
445 void print_invocation_count();
446 #endif
448 // byte codes
449 void set_code(address code) { return constMethod()->set_code(code); }
450 address code_base() const { return constMethod()->code_base(); }
451 bool contains(address bcp) const { return constMethod()->contains(bcp); }
453 // prints byte codes
454 void print_codes() const { print_codes_on(tty); }
455 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
456 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
458 // method parameters
459 int method_parameters_length() const
460 { return constMethod()->method_parameters_length(); }
461 MethodParametersElement* method_parameters_start() const
462 { return constMethod()->method_parameters_start(); }
464 // checked exceptions
465 int checked_exceptions_length() const
466 { return constMethod()->checked_exceptions_length(); }
467 CheckedExceptionElement* checked_exceptions_start() const
468 { return constMethod()->checked_exceptions_start(); }
470 // localvariable table
471 bool has_localvariable_table() const
472 { return constMethod()->has_localvariable_table(); }
473 int localvariable_table_length() const
474 { return constMethod()->localvariable_table_length(); }
475 LocalVariableTableElement* localvariable_table_start() const
476 { return constMethod()->localvariable_table_start(); }
478 bool has_linenumber_table() const
479 { return constMethod()->has_linenumber_table(); }
480 u_char* compressed_linenumber_table() const
481 { return constMethod()->compressed_linenumber_table(); }
483 // method holder (the Klass* holding this method)
484 InstanceKlass* method_holder() const { return constants()->pool_holder(); }
486 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
487 Symbol* klass_name() const; // returns the name of the method holder
488 BasicType result_type() const; // type of the method result
489 int result_type_index() const; // type index of the method result
490 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
491 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
493 // Checked exceptions thrown by this method (resolved to mirrors)
494 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
496 // Access flags
497 bool is_public() const { return access_flags().is_public(); }
498 bool is_private() const { return access_flags().is_private(); }
499 bool is_protected() const { return access_flags().is_protected(); }
500 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
501 bool is_static() const { return access_flags().is_static(); }
502 bool is_final() const { return access_flags().is_final(); }
503 bool is_synchronized() const { return access_flags().is_synchronized();}
504 bool is_native() const { return access_flags().is_native(); }
505 bool is_abstract() const { return access_flags().is_abstract(); }
506 bool is_strict() const { return access_flags().is_strict(); }
507 bool is_synthetic() const { return access_flags().is_synthetic(); }
509 // returns true if contains only return operation
510 bool is_empty_method() const;
512 // returns true if this is a vanilla constructor
513 bool is_vanilla_constructor() const;
515 // checks method and its method holder
516 bool is_final_method() const;
517 bool is_strict_method() const;
519 // true if method needs no dynamic dispatch (final and/or no vtable entry)
520 bool can_be_statically_bound() const;
522 // returns true if the method has any backward branches.
523 bool has_loops() {
524 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
525 };
527 bool compute_has_loops_flag();
529 bool has_jsrs() {
530 return access_flags().has_jsrs();
531 };
532 void set_has_jsrs() {
533 _access_flags.set_has_jsrs();
534 }
536 // returns true if the method has any monitors.
537 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
538 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
540 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
542 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
543 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
544 // has not been computed yet.
545 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
546 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
548 // returns true if the method is an accessor function (setter/getter).
549 bool is_accessor() const;
551 // returns true if the method is an initializer (<init> or <clinit>).
552 bool is_initializer() const;
554 // returns true if the method is static OR if the classfile version < 51
555 bool has_valid_initializer_flags() const;
557 // returns true if the method name is <clinit> and the method has
558 // valid static initializer flags.
559 bool is_static_initializer() const;
561 // compiled code support
562 // NOTE: code() is inherently racy as deopt can be clearing code
563 // simultaneously. Use with caution.
564 bool has_compiled_code() const { return code() != NULL; }
566 // sizing
567 static int header_size() { return sizeof(Method)/HeapWordSize; }
568 static int size(bool is_native);
569 int size() const { return method_size(); }
570 #if INCLUDE_SERVICES
571 void collect_statistics(KlassSizeStats *sz) const;
572 #endif
574 // interpreter support
575 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
576 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
577 #ifdef CC_INTERP
578 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
579 #endif /* CC_INTERP */
580 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
581 static ByteSize code_offset() { return byte_offset_of(Method, _code); }
582 static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); }
583 static ByteSize backedge_counter_offset() { return byte_offset_of(Method, _backedge_counter); }
584 static ByteSize method_data_offset() {
585 return byte_offset_of(Method, _method_data);
586 }
587 static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(Method, _interpreter_invocation_count); }
588 #ifndef PRODUCT
589 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
590 #endif // not PRODUCT
591 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
592 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
593 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
594 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
596 // for code generation
597 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
598 static int interpreter_invocation_counter_offset_in_bytes()
599 { return offset_of(Method, _interpreter_invocation_count); }
600 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
601 static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
603 // Static methods that are used to implement member methods where an exposed this pointer
604 // is needed due to possible GCs
605 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
607 // Returns the byte code index from the byte code pointer
608 int bci_from(address bcp) const;
609 address bcp_from(int bci) const;
610 int validate_bci_from_bcx(intptr_t bcx) const;
612 // Returns the line number for a bci if debugging information for the method is prowided,
613 // -1 is returned otherwise.
614 int line_number_from_bci(int bci) const;
616 // Reflection support
617 bool is_overridden_in(Klass* k) const;
619 // JSR 292 support
620 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
621 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
622 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
623 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
624 Symbol* signature, //anything at all
625 TRAPS);
626 static Klass* check_non_bcp_klass(Klass* klass);
627 // these operate only on invoke methods:
628 // presize interpreter frames for extra interpreter stack entries, if needed
629 // method handles want to be able to push a few extra values (e.g., a bound receiver), and
630 // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
631 // all without checking for a stack overflow
632 static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; }
633 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
635 // RedefineClasses() support:
636 bool is_old() const { return access_flags().is_old(); }
637 void set_is_old() { _access_flags.set_is_old(); }
638 bool is_obsolete() const { return access_flags().is_obsolete(); }
639 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
640 bool on_stack() const { return access_flags().on_stack(); }
641 void set_on_stack(const bool value);
643 // see the definition in Method*.cpp for the gory details
644 bool should_not_be_cached() const;
646 // JVMTI Native method prefixing support:
647 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
648 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
650 // Rewriting support
651 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
652 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
654 // jmethodID handling
655 // Because the useful life-span of a jmethodID cannot be determined,
656 // once created they are never reclaimed. The methods to which they refer,
657 // however, can be GC'ed away if the class is unloaded or if the method is
658 // made obsolete or deleted -- in these cases, the jmethodID
659 // refers to NULL (as is the case for any weak reference).
660 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
661 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
663 // Use resolve_jmethod_id() in situations where the caller is expected
664 // to provide a valid jmethodID; the only sanity checks are in asserts;
665 // result guaranteed not to be NULL.
666 inline static Method* resolve_jmethod_id(jmethodID mid) {
667 assert(mid != NULL, "JNI method id should not be null");
668 return *((Method**)mid);
669 }
671 // Use checked_resolve_jmethod_id() in situations where the caller
672 // should provide a valid jmethodID, but might not. NULL is returned
673 // when the jmethodID does not refer to a valid method.
674 static Method* checked_resolve_jmethod_id(jmethodID mid);
676 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
677 static bool is_method_id(jmethodID mid);
679 // Clear methods
680 static void clear_jmethod_ids(ClassLoaderData* loader_data);
681 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
683 // Get this method's jmethodID -- allocate if it doesn't exist
684 jmethodID jmethod_id() { methodHandle this_h(this);
685 return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
687 // Lookup the jmethodID for this method. Return NULL if not found.
688 // NOTE that this function can be called from a signal handler
689 // (see AsyncGetCallTrace support for Forte Analyzer) and this
690 // needs to be async-safe. No allocation should be done and
691 // so handles are not used to avoid deadlock.
692 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
694 // JNI static invoke cached itable index accessors
695 int cached_itable_index() { return method_holder()->cached_itable_index(method_idnum()); }
696 void set_cached_itable_index(int index) { method_holder()->set_cached_itable_index(method_idnum(), index); }
698 // Support for inlining of intrinsic methods
699 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
700 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
702 // Helper routines for intrinsic_id() and vmIntrinsics::method().
703 void init_intrinsic_id(); // updates from _none if a match
704 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
706 bool jfr_towrite() { return _jfr_towrite; }
707 void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
709 bool force_inline() { return _force_inline; }
710 void set_force_inline(bool x) { _force_inline = x; }
711 bool dont_inline() { return _dont_inline; }
712 void set_dont_inline(bool x) { _dont_inline = x; }
713 bool is_hidden() { return _hidden; }
714 void set_hidden(bool x) { _hidden = x; }
715 ConstMethod::MethodType method_type() const {
716 return _constMethod->method_type();
717 }
718 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
720 // On-stack replacement support
721 bool has_osr_nmethod(int level, bool match_level) {
722 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
723 }
725 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
726 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
727 }
729 // Inline cache support
730 void cleanup_inline_caches();
732 // Find if klass for method is loaded
733 bool is_klass_loaded_by_klass_index(int klass_index) const;
734 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
736 // Indicates whether compilation failed earlier for this method, or
737 // whether it is not compilable for another reason like having a
738 // breakpoint set in it.
739 bool is_not_compilable(int comp_level = CompLevel_any) const;
740 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
741 void set_not_compilable_quietly(int comp_level = CompLevel_all) {
742 set_not_compilable(comp_level, false);
743 }
744 bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
745 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
746 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
747 set_not_osr_compilable(comp_level, false);
748 }
750 private:
751 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
753 public:
754 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
755 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
756 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
757 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
759 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
760 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
761 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
762 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
764 // Background compilation support
765 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
766 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
767 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
769 // Resolve all classes in signature, return 'true' if successful
770 static bool load_signature_classes(methodHandle m, TRAPS);
772 // Return if true if not all classes references in signature, including return type, has been loaded
773 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
775 // Printing
776 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM
777 #if INCLUDE_JVMTI
778 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses
779 #else
780 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
781 #endif
783 // Helper routine used for method sorting
784 static void sort_methods(Array<Method*>* methods, bool idempotent = false);
786 // Deallocation function for redefine classes or if an error occurs
787 void deallocate_contents(ClassLoaderData* loader_data);
789 // Printing
790 #ifndef PRODUCT
791 void print_on(outputStream* st) const;
792 #endif
793 void print_value_on(outputStream* st) const;
795 const char* internal_name() const { return "{method}"; }
797 // Check for valid method pointer
798 bool is_valid_method() const;
800 // Verify
801 void verify() { verify_on(tty); }
802 void verify_on(outputStream* st);
804 private:
806 // Inlined elements
807 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
808 address* signature_handler_addr() const { return native_function_addr() + 1; }
809 };
812 // Utility class for compressing line number tables
814 class CompressedLineNumberWriteStream: public CompressedWriteStream {
815 private:
816 int _bci;
817 int _line;
818 public:
819 // Constructor
820 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
821 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
823 // Write (bci, line number) pair to stream
824 void write_pair_regular(int bci_delta, int line_delta);
826 inline void write_pair_inline(int bci, int line) {
827 int bci_delta = bci - _bci;
828 int line_delta = line - _line;
829 _bci = bci;
830 _line = line;
831 // Skip (0,0) deltas - they do not add information and conflict with terminator.
832 if (bci_delta == 0 && line_delta == 0) return;
833 // Check if bci is 5-bit and line number 3-bit unsigned.
834 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
835 // Compress into single byte.
836 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
837 // Check that value doesn't match escape character.
838 if (value != 0xFF) {
839 write_byte(value);
840 return;
841 }
842 }
843 write_pair_regular(bci_delta, line_delta);
844 }
846 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
847 // Disabling optimization doesn't work for methods in header files
848 // so we force it to call through the non-optimized version in the .cpp.
849 // It's gross, but it's the only way we can ensure that all callers are
850 // fixed. _MSC_VER is defined by the windows compiler
851 #if defined(_M_AMD64) && _MSC_VER >= 1400
852 void write_pair(int bci, int line);
853 #else
854 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
855 #endif
857 // Write end-of-stream marker
858 void write_terminator() { write_byte(0); }
859 };
862 // Utility class for decompressing line number tables
864 class CompressedLineNumberReadStream: public CompressedReadStream {
865 private:
866 int _bci;
867 int _line;
868 public:
869 // Constructor
870 CompressedLineNumberReadStream(u_char* buffer);
871 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
872 bool read_pair();
873 // Accessing bci and line number (after calling read_pair)
874 int bci() const { return _bci; }
875 int line() const { return _line; }
876 };
879 /// Fast Breakpoints.
881 // If this structure gets more complicated (because bpts get numerous),
882 // move it into its own header.
884 // There is presently no provision for concurrent access
885 // to breakpoint lists, which is only OK for JVMTI because
886 // breakpoints are written only at safepoints, and are read
887 // concurrently only outside of safepoints.
889 class BreakpointInfo : public CHeapObj<mtClass> {
890 friend class VMStructs;
891 private:
892 Bytecodes::Code _orig_bytecode;
893 int _bci;
894 u2 _name_index; // of method
895 u2 _signature_index; // of method
896 BreakpointInfo* _next; // simple storage allocation
898 public:
899 BreakpointInfo(Method* m, int bci);
901 // accessors
902 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
903 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
904 int bci() { return _bci; }
906 BreakpointInfo* next() const { return _next; }
907 void set_next(BreakpointInfo* n) { _next = n; }
909 // helps for searchers
910 bool match(const Method* m, int bci) {
911 return bci == _bci && match(m);
912 }
914 bool match(const Method* m) {
915 return _name_index == m->name_index() &&
916 _signature_index == m->signature_index();
917 }
919 void set(Method* method);
920 void clear(Method* method);
921 };
923 // Utility class for access exception handlers
924 class ExceptionTable : public StackObj {
925 private:
926 ExceptionTableElement* _table;
927 u2 _length;
929 public:
930 ExceptionTable(Method* m) {
931 if (m->has_exception_handler()) {
932 _table = m->exception_table_start();
933 _length = m->exception_table_length();
934 } else {
935 _table = NULL;
936 _length = 0;
937 }
938 }
940 int length() const {
941 return _length;
942 }
944 u2 start_pc(int idx) const {
945 assert(idx < _length, "out of bounds");
946 return _table[idx].start_pc;
947 }
949 void set_start_pc(int idx, u2 value) {
950 assert(idx < _length, "out of bounds");
951 _table[idx].start_pc = value;
952 }
954 u2 end_pc(int idx) const {
955 assert(idx < _length, "out of bounds");
956 return _table[idx].end_pc;
957 }
959 void set_end_pc(int idx, u2 value) {
960 assert(idx < _length, "out of bounds");
961 _table[idx].end_pc = value;
962 }
964 u2 handler_pc(int idx) const {
965 assert(idx < _length, "out of bounds");
966 return _table[idx].handler_pc;
967 }
969 void set_handler_pc(int idx, u2 value) {
970 assert(idx < _length, "out of bounds");
971 _table[idx].handler_pc = value;
972 }
974 u2 catch_type_index(int idx) const {
975 assert(idx < _length, "out of bounds");
976 return _table[idx].catch_type_index;
977 }
979 void set_catch_type_index(int idx, u2 value) {
980 assert(idx < _length, "out of bounds");
981 _table[idx].catch_type_index = value;
982 }
983 };
985 #endif // SHARE_VM_OOPS_METHODOOP_HPP