Tue, 17 Mar 2015 01:56:32 -0700
8042796: jvmtiRedefineClasses.cpp: guarantee(false) failed: OLD and/or OBSOLETE method(s) found
Summary: Relax the guaranty for deleted methods
Reviewed-by: dcubed, coleenp
1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP
26 #define SHARE_VM_OOPS_METHODOOP_HPP
28 #include "classfile/vmSymbols.hpp"
29 #include "code/compressedStream.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/invocationCounter.hpp"
32 #include "oops/annotations.hpp"
33 #include "oops/constantPool.hpp"
34 #include "oops/methodCounters.hpp"
35 #include "oops/instanceKlass.hpp"
36 #include "oops/oop.hpp"
37 #include "oops/typeArrayOop.hpp"
38 #include "utilities/accessFlags.hpp"
39 #include "utilities/growableArray.hpp"
41 // A Method* represents a Java method.
42 //
43 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
44 // so keeping the size of this structure small has a big impact on footprint.
45 //
46 // We put all oops and method_size first for better gc cache locality.
47 //
48 // The actual bytecodes are inlined after the end of the Method struct.
49 //
50 // There are bits in the access_flags telling whether inlined tables are present.
51 // Note that accessing the line number and local variable tables is not performance critical at all.
52 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
53 // to it fast.
54 //
55 // The line number table is compressed and inlined following the byte codes. It is found as the first
56 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
57 // after the line number table, and indexed from the end of the method. We do not compress the checked
58 // exceptions table since the average length is less than 2, and do not bother to compress the local
59 // variable table either since it is mostly absent.
60 //
61 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
62 //
63 // |------------------------------------------------------|
64 // | header |
65 // | klass |
66 // |------------------------------------------------------|
67 // | ConstMethod* (oop) |
68 // |------------------------------------------------------|
69 // | methodData (oop) |
70 // | methodCounters |
71 // |------------------------------------------------------|
72 // | access_flags |
73 // | vtable_index |
74 // |------------------------------------------------------|
75 // | result_index (C++ interpreter only) |
76 // |------------------------------------------------------|
77 // | method_size | intrinsic_id| flags |
78 // |------------------------------------------------------|
79 // | code (pointer) |
80 // | i2i (pointer) |
81 // | adapter (pointer) |
82 // | from_compiled_entry (pointer) |
83 // | from_interpreted_entry (pointer) |
84 // |------------------------------------------------------|
85 // | native_function (present only if native) |
86 // | signature_handler (present only if native) |
87 // |------------------------------------------------------|
90 class CheckedExceptionElement;
91 class LocalVariableTableElement;
92 class AdapterHandlerEntry;
93 class MethodData;
94 class MethodCounters;
95 class ConstMethod;
96 class InlineTableSizes;
97 class KlassSizeStats;
99 class Method : public Metadata {
100 friend class VMStructs;
101 private:
102 ConstMethod* _constMethod; // Method read-only data.
103 MethodData* _method_data;
104 MethodCounters* _method_counters;
105 AccessFlags _access_flags; // Access flags
106 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
107 // note: can have vtables with >2**16 elements (because of inheritance)
108 #ifdef CC_INTERP
109 int _result_index; // C++ interpreter needs for converting results to/from stack
110 #endif
111 u2 _method_size; // size of this object
112 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
113 u1 _jfr_towrite : 1, // Flags
114 _caller_sensitive : 1,
115 _force_inline : 1,
116 _hidden : 1,
117 _dont_inline : 1,
118 : 3;
120 #ifndef PRODUCT
121 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
122 #endif
123 // Entry point for calling both from and to the interpreter.
124 address _i2i_entry; // All-args-on-stack calling convention
125 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
126 AdapterHandlerEntry* _adapter;
127 // Entry point for calling from compiled code, to compiled code if it exists
128 // or else the interpreter.
129 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
130 // The entry point for calling both from and to compiled code is
131 // "_code->entry_point()". Because of tiered compilation and de-opt, this
132 // field can come and go. It can transition from NULL to not-null at any
133 // time (whenever a compile completes). It can transition from not-null to
134 // NULL only at safepoints (because of a de-opt).
135 nmethod* volatile _code; // Points to the corresponding piece of native code
136 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
138 // Constructor
139 Method(ConstMethod* xconst, AccessFlags access_flags, int size);
140 public:
142 static Method* allocate(ClassLoaderData* loader_data,
143 int byte_code_size,
144 AccessFlags access_flags,
145 InlineTableSizes* sizes,
146 ConstMethod::MethodType method_type,
147 TRAPS);
149 // CDS and vtbl checking can create an empty Method to get vtbl pointer.
150 Method(){}
152 // The Method vtable is restored by this call when the Method is in the
153 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
154 // all the gory details. SA, dtrace and pstack helpers distinguish metadata
155 // by their vtable.
156 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
157 bool is_method() const volatile { return true; }
159 void restore_unshareable_info(TRAPS);
161 // accessors for instance variables
163 ConstMethod* constMethod() const { return _constMethod; }
164 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
167 static address make_adapters(methodHandle mh, TRAPS);
168 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
169 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
171 // access flag
172 AccessFlags access_flags() const { return _access_flags; }
173 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
175 // name
176 Symbol* name() const { return constants()->symbol_at(name_index()); }
177 int name_index() const { return constMethod()->name_index(); }
178 void set_name_index(int index) { constMethod()->set_name_index(index); }
180 // signature
181 Symbol* signature() const { return constants()->symbol_at(signature_index()); }
182 int signature_index() const { return constMethod()->signature_index(); }
183 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
185 // generics support
186 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
187 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
188 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
190 // annotations support
191 AnnotationArray* annotations() const {
192 return constMethod()->method_annotations();
193 }
194 AnnotationArray* parameter_annotations() const {
195 return constMethod()->parameter_annotations();
196 }
197 AnnotationArray* annotation_default() const {
198 return constMethod()->default_annotations();
199 }
200 AnnotationArray* type_annotations() const {
201 return constMethod()->type_annotations();
202 }
204 #ifdef CC_INTERP
205 void set_result_index(BasicType type);
206 int result_index() { return _result_index; }
207 #endif
209 // Helper routine: get klass name + "." + method name + signature as
210 // C string, for the purpose of providing more useful NoSuchMethodErrors
211 // and fatal error handling. The string is allocated in resource
212 // area if a buffer is not provided by the caller.
213 char* name_and_sig_as_C_string() const;
214 char* name_and_sig_as_C_string(char* buf, int size) const;
216 // Static routine in the situations we don't have a Method*
217 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
218 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
220 Bytecodes::Code java_code_at(int bci) const {
221 return Bytecodes::java_code_at(this, bcp_from(bci));
222 }
223 Bytecodes::Code code_at(int bci) const {
224 return Bytecodes::code_at(this, bcp_from(bci));
225 }
227 // JVMTI breakpoints
228 Bytecodes::Code orig_bytecode_at(int bci) const;
229 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
230 void set_breakpoint(int bci);
231 void clear_breakpoint(int bci);
232 void clear_all_breakpoints();
233 // Tracking number of breakpoints, for fullspeed debugging.
234 // Only mutated by VM thread.
235 u2 number_of_breakpoints() const {
236 MethodCounters* mcs = method_counters();
237 if (mcs == NULL) {
238 return 0;
239 } else {
240 return mcs->number_of_breakpoints();
241 }
242 }
243 void incr_number_of_breakpoints(TRAPS) {
244 MethodCounters* mcs = get_method_counters(CHECK);
245 if (mcs != NULL) {
246 mcs->incr_number_of_breakpoints();
247 }
248 }
249 void decr_number_of_breakpoints(TRAPS) {
250 MethodCounters* mcs = get_method_counters(CHECK);
251 if (mcs != NULL) {
252 mcs->decr_number_of_breakpoints();
253 }
254 }
255 // Initialization only
256 void clear_number_of_breakpoints() {
257 MethodCounters* mcs = method_counters();
258 if (mcs != NULL) {
259 mcs->clear_number_of_breakpoints();
260 }
261 }
263 // index into InstanceKlass methods() array
264 // note: also used by jfr
265 u2 method_idnum() const { return constMethod()->method_idnum(); }
266 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
268 // code size
269 int code_size() const { return constMethod()->code_size(); }
271 // method size
272 int method_size() const { return _method_size; }
273 void set_method_size(int size) {
274 assert(0 <= size && size < (1 << 16), "invalid method size");
275 _method_size = size;
276 }
278 // constant pool for Klass* holding this method
279 ConstantPool* constants() const { return constMethod()->constants(); }
280 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
282 // max stack
283 // return original max stack size for method verification
284 int verifier_max_stack() const { return constMethod()->max_stack(); }
285 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
286 void set_max_stack(int size) { constMethod()->set_max_stack(size); }
288 // max locals
289 int max_locals() const { return constMethod()->max_locals(); }
290 void set_max_locals(int size) { constMethod()->set_max_locals(size); }
292 int highest_comp_level() const;
293 void set_highest_comp_level(int level);
294 int highest_osr_comp_level() const;
295 void set_highest_osr_comp_level(int level);
297 // Count of times method was exited via exception while interpreting
298 void interpreter_throwout_increment(TRAPS) {
299 MethodCounters* mcs = get_method_counters(CHECK);
300 if (mcs != NULL) {
301 mcs->interpreter_throwout_increment();
302 }
303 }
305 int interpreter_throwout_count() const {
306 MethodCounters* mcs = method_counters();
307 if (mcs == NULL) {
308 return 0;
309 } else {
310 return mcs->interpreter_throwout_count();
311 }
312 }
314 // size of parameters
315 int size_of_parameters() const { return constMethod()->size_of_parameters(); }
316 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
318 bool has_stackmap_table() const {
319 return constMethod()->has_stackmap_table();
320 }
322 Array<u1>* stackmap_data() const {
323 return constMethod()->stackmap_data();
324 }
326 void set_stackmap_data(Array<u1>* sd) {
327 constMethod()->set_stackmap_data(sd);
328 }
330 // exception handler table
331 bool has_exception_handler() const
332 { return constMethod()->has_exception_handler(); }
333 int exception_table_length() const
334 { return constMethod()->exception_table_length(); }
335 ExceptionTableElement* exception_table_start() const
336 { return constMethod()->exception_table_start(); }
338 // Finds the first entry point bci of an exception handler for an
339 // exception of klass ex_klass thrown at throw_bci. A value of NULL
340 // for ex_klass indicates that the exception klass is not known; in
341 // this case it matches any constraint class. Returns -1 if the
342 // exception cannot be handled in this method. The handler
343 // constraint classes are loaded if necessary. Note that this may
344 // throw an exception if loading of the constraint classes causes
345 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
346 // If an exception is thrown, returns the bci of the
347 // exception handler which caused the exception to be thrown, which
348 // is needed for proper retries. See, for example,
349 // InterpreterRuntime::exception_handler_for_exception.
350 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
352 // method data access
353 MethodData* method_data() const {
354 return _method_data;
355 }
357 void set_method_data(MethodData* data) {
358 // The store into method must be released. On platforms without
359 // total store order (TSO) the reference may become visible before
360 // the initialization of data otherwise.
361 OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
362 }
364 MethodCounters* method_counters() const {
365 return _method_counters;
366 }
368 void clear_method_counters() {
369 _method_counters = NULL;
370 }
372 bool init_method_counters(MethodCounters* counters) {
373 // Try to install a pointer to MethodCounters, return true on success.
374 return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
375 }
377 #ifdef TIERED
378 // We are reusing interpreter_invocation_count as a holder for the previous event count!
379 // We can do that since interpreter_invocation_count is not used in tiered.
380 int prev_event_count() const {
381 if (method_counters() == NULL) {
382 return 0;
383 } else {
384 return method_counters()->interpreter_invocation_count();
385 }
386 }
387 void set_prev_event_count(int count) {
388 MethodCounters* mcs = method_counters();
389 if (mcs != NULL) {
390 mcs->set_interpreter_invocation_count(count);
391 }
392 }
393 jlong prev_time() const {
394 MethodCounters* mcs = method_counters();
395 return mcs == NULL ? 0 : mcs->prev_time();
396 }
397 void set_prev_time(jlong time) {
398 MethodCounters* mcs = method_counters();
399 if (mcs != NULL) {
400 mcs->set_prev_time(time);
401 }
402 }
403 float rate() const {
404 MethodCounters* mcs = method_counters();
405 return mcs == NULL ? 0 : mcs->rate();
406 }
407 void set_rate(float rate) {
408 MethodCounters* mcs = method_counters();
409 if (mcs != NULL) {
410 mcs->set_rate(rate);
411 }
412 }
413 #endif
415 int invocation_count();
416 int backedge_count();
418 bool was_executed_more_than(int n);
419 bool was_never_executed() { return !was_executed_more_than(0); }
421 static void build_interpreter_method_data(methodHandle method, TRAPS);
423 static MethodCounters* build_method_counters(Method* m, TRAPS);
425 int interpreter_invocation_count() {
426 if (TieredCompilation) {
427 return invocation_count();
428 } else {
429 MethodCounters* mcs = method_counters();
430 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
431 }
432 }
433 int increment_interpreter_invocation_count(TRAPS) {
434 if (TieredCompilation) ShouldNotReachHere();
435 MethodCounters* mcs = get_method_counters(CHECK_0);
436 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
437 }
439 #ifndef PRODUCT
440 int compiled_invocation_count() const { return _compiled_invocation_count; }
441 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
442 #endif // not PRODUCT
444 // Clear (non-shared space) pointers which could not be relevant
445 // if this (shared) method were mapped into another JVM.
446 void remove_unshareable_info();
448 // nmethod/verified compiler entry
449 address verified_code_entry();
450 bool check_code() const; // Not inline to avoid circular ref
451 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
452 void clear_code(); // Clear out any compiled code
453 static void set_code(methodHandle mh, nmethod* code);
454 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
455 address get_i2c_entry();
456 address get_c2i_entry();
457 address get_c2i_unverified_entry();
458 AdapterHandlerEntry* adapter() { return _adapter; }
459 // setup entry points
460 void link_method(methodHandle method, TRAPS);
461 // clear entry points. Used by sharing code
462 void unlink_method();
464 // vtable index
465 enum VtableIndexFlag {
466 // Valid vtable indexes are non-negative (>= 0).
467 // These few negative values are used as sentinels.
468 itable_index_max = -10, // first itable index, growing downward
469 pending_itable_index = -9, // itable index will be assigned
470 invalid_vtable_index = -4, // distinct from any valid vtable index
471 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
472 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
473 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
474 };
475 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
476 bool has_vtable_index() const { return _vtable_index >= 0; }
477 int vtable_index() const { return _vtable_index; }
478 void set_vtable_index(int index) { _vtable_index = index; }
479 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
480 bool has_itable_index() const { return _vtable_index <= itable_index_max; }
481 int itable_index() const { assert(valid_itable_index(), "");
482 return itable_index_max - _vtable_index; }
483 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
485 // interpreter entry
486 address interpreter_entry() const { return _i2i_entry; }
487 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
488 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
490 // native function (used for native methods only)
491 enum {
492 native_bind_event_is_interesting = true
493 };
494 address native_function() const { return *(native_function_addr()); }
495 address critical_native_function();
497 // Must specify a real function (not NULL).
498 // Use clear_native_function() to unregister.
499 void set_native_function(address function, bool post_event_flag);
500 bool has_native_function() const;
501 void clear_native_function();
503 // signature handler (used for native methods only)
504 address signature_handler() const { return *(signature_handler_addr()); }
505 void set_signature_handler(address handler);
507 // Interpreter oopmap support
508 void mask_for(int bci, InterpreterOopMap* mask);
510 #ifndef PRODUCT
511 // operations on invocation counter
512 void print_invocation_count();
513 #endif
515 // byte codes
516 void set_code(address code) { return constMethod()->set_code(code); }
517 address code_base() const { return constMethod()->code_base(); }
518 bool contains(address bcp) const { return constMethod()->contains(bcp); }
520 // prints byte codes
521 void print_codes() const { print_codes_on(tty); }
522 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
523 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
525 // method parameters
526 bool has_method_parameters() const
527 { return constMethod()->has_method_parameters(); }
528 int method_parameters_length() const
529 { return constMethod()->method_parameters_length(); }
530 MethodParametersElement* method_parameters_start() const
531 { return constMethod()->method_parameters_start(); }
533 // checked exceptions
534 int checked_exceptions_length() const
535 { return constMethod()->checked_exceptions_length(); }
536 CheckedExceptionElement* checked_exceptions_start() const
537 { return constMethod()->checked_exceptions_start(); }
539 // localvariable table
540 bool has_localvariable_table() const
541 { return constMethod()->has_localvariable_table(); }
542 int localvariable_table_length() const
543 { return constMethod()->localvariable_table_length(); }
544 LocalVariableTableElement* localvariable_table_start() const
545 { return constMethod()->localvariable_table_start(); }
547 bool has_linenumber_table() const
548 { return constMethod()->has_linenumber_table(); }
549 u_char* compressed_linenumber_table() const
550 { return constMethod()->compressed_linenumber_table(); }
552 // method holder (the Klass* holding this method)
553 InstanceKlass* method_holder() const { return constants()->pool_holder(); }
555 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
556 Symbol* klass_name() const; // returns the name of the method holder
557 BasicType result_type() const; // type of the method result
558 int result_type_index() const; // type index of the method result
559 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
560 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
562 // Checked exceptions thrown by this method (resolved to mirrors)
563 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
565 // Access flags
566 bool is_public() const { return access_flags().is_public(); }
567 bool is_private() const { return access_flags().is_private(); }
568 bool is_protected() const { return access_flags().is_protected(); }
569 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
570 bool is_static() const { return access_flags().is_static(); }
571 bool is_final() const { return access_flags().is_final(); }
572 bool is_synchronized() const { return access_flags().is_synchronized();}
573 bool is_native() const { return access_flags().is_native(); }
574 bool is_abstract() const { return access_flags().is_abstract(); }
575 bool is_strict() const { return access_flags().is_strict(); }
576 bool is_synthetic() const { return access_flags().is_synthetic(); }
578 // returns true if contains only return operation
579 bool is_empty_method() const;
581 // returns true if this is a vanilla constructor
582 bool is_vanilla_constructor() const;
584 // checks method and its method holder
585 bool is_final_method() const;
586 bool is_final_method(AccessFlags class_access_flags) const;
587 bool is_default_method() const;
589 // true if method needs no dynamic dispatch (final and/or no vtable entry)
590 bool can_be_statically_bound() const;
591 bool can_be_statically_bound(AccessFlags class_access_flags) const;
593 // returns true if the method has any backward branches.
594 bool has_loops() {
595 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
596 };
598 bool compute_has_loops_flag();
600 bool has_jsrs() {
601 return access_flags().has_jsrs();
602 };
603 void set_has_jsrs() {
604 _access_flags.set_has_jsrs();
605 }
607 // returns true if the method has any monitors.
608 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
609 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
611 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
613 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
614 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
615 // has not been computed yet.
616 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
617 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
619 // returns true if the method is an accessor function (setter/getter).
620 bool is_accessor() const;
622 // returns true if the method does nothing but return a constant of primitive type
623 bool is_constant_getter() const;
625 // returns true if the method is an initializer (<init> or <clinit>).
626 bool is_initializer() const;
628 // returns true if the method is static OR if the classfile version < 51
629 bool has_valid_initializer_flags() const;
631 // returns true if the method name is <clinit> and the method has
632 // valid static initializer flags.
633 bool is_static_initializer() const;
635 // compiled code support
636 // NOTE: code() is inherently racy as deopt can be clearing code
637 // simultaneously. Use with caution.
638 bool has_compiled_code() const { return code() != NULL; }
640 // sizing
641 static int header_size() { return sizeof(Method)/HeapWordSize; }
642 static int size(bool is_native);
643 int size() const { return method_size(); }
644 #if INCLUDE_SERVICES
645 void collect_statistics(KlassSizeStats *sz) const;
646 #endif
648 // interpreter support
649 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
650 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
651 #ifdef CC_INTERP
652 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
653 #endif /* CC_INTERP */
654 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
655 static ByteSize code_offset() { return byte_offset_of(Method, _code); }
656 static ByteSize method_data_offset() {
657 return byte_offset_of(Method, _method_data);
658 }
659 static ByteSize method_counters_offset() {
660 return byte_offset_of(Method, _method_counters);
661 }
662 #ifndef PRODUCT
663 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
664 #endif // not PRODUCT
665 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
666 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
667 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
668 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
670 // for code generation
671 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
672 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
673 static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
675 // Static methods that are used to implement member methods where an exposed this pointer
676 // is needed due to possible GCs
677 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
679 // Returns the byte code index from the byte code pointer
680 int bci_from(address bcp) const;
681 address bcp_from(int bci) const;
682 int validate_bci_from_bcx(intptr_t bcx) const;
684 // Returns the line number for a bci if debugging information for the method is prowided,
685 // -1 is returned otherwise.
686 int line_number_from_bci(int bci) const;
688 // Reflection support
689 bool is_overridden_in(Klass* k) const;
691 // Stack walking support
692 bool is_ignored_by_security_stack_walk() const;
694 // JSR 292 support
695 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
696 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
697 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
698 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
699 Symbol* signature, //anything at all
700 TRAPS);
701 static Klass* check_non_bcp_klass(Klass* klass);
703 // How many extra stack entries for invokedynamic when it's enabled
704 static const int extra_stack_entries_for_jsr292 = 1;
706 // this operates only on invoke methods:
707 // presize interpreter frames for extra interpreter stack entries, if needed
708 // Account for the extra appendix argument for invokehandle/invokedynamic
709 static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
710 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
712 // RedefineClasses() support:
713 bool is_old() const { return access_flags().is_old(); }
714 void set_is_old() { _access_flags.set_is_old(); }
715 bool is_obsolete() const { return access_flags().is_obsolete(); }
716 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
717 bool is_deleted() const { return access_flags().is_deleted(); }
718 void set_is_deleted() { _access_flags.set_is_deleted(); }
719 bool on_stack() const { return access_flags().on_stack(); }
720 void set_on_stack(const bool value);
722 // see the definition in Method*.cpp for the gory details
723 bool should_not_be_cached() const;
725 // JVMTI Native method prefixing support:
726 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
727 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
729 // Rewriting support
730 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
731 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
733 // jmethodID handling
734 // Because the useful life-span of a jmethodID cannot be determined,
735 // once created they are never reclaimed. The methods to which they refer,
736 // however, can be GC'ed away if the class is unloaded or if the method is
737 // made obsolete or deleted -- in these cases, the jmethodID
738 // refers to NULL (as is the case for any weak reference).
739 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
740 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
742 // Use resolve_jmethod_id() in situations where the caller is expected
743 // to provide a valid jmethodID; the only sanity checks are in asserts;
744 // result guaranteed not to be NULL.
745 inline static Method* resolve_jmethod_id(jmethodID mid) {
746 assert(mid != NULL, "JNI method id should not be null");
747 return *((Method**)mid);
748 }
750 // Use checked_resolve_jmethod_id() in situations where the caller
751 // should provide a valid jmethodID, but might not. NULL is returned
752 // when the jmethodID does not refer to a valid method.
753 static Method* checked_resolve_jmethod_id(jmethodID mid);
755 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
756 static bool is_method_id(jmethodID mid);
758 // Clear methods
759 static void clear_jmethod_ids(ClassLoaderData* loader_data);
760 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
762 // Get this method's jmethodID -- allocate if it doesn't exist
763 jmethodID jmethod_id() { methodHandle this_h(this);
764 return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
766 // Lookup the jmethodID for this method. Return NULL if not found.
767 // NOTE that this function can be called from a signal handler
768 // (see AsyncGetCallTrace support for Forte Analyzer) and this
769 // needs to be async-safe. No allocation should be done and
770 // so handles are not used to avoid deadlock.
771 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
773 // Support for inlining of intrinsic methods
774 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
775 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
777 // Helper routines for intrinsic_id() and vmIntrinsics::method().
778 void init_intrinsic_id(); // updates from _none if a match
779 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
781 bool jfr_towrite() { return _jfr_towrite; }
782 void set_jfr_towrite(bool x) { _jfr_towrite = x; }
783 bool caller_sensitive() { return _caller_sensitive; }
784 void set_caller_sensitive(bool x) { _caller_sensitive = x; }
785 bool force_inline() { return _force_inline; }
786 void set_force_inline(bool x) { _force_inline = x; }
787 bool dont_inline() { return _dont_inline; }
788 void set_dont_inline(bool x) { _dont_inline = x; }
789 bool is_hidden() { return _hidden; }
790 void set_hidden(bool x) { _hidden = x; }
791 ConstMethod::MethodType method_type() const {
792 return _constMethod->method_type();
793 }
794 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
796 // On-stack replacement support
797 bool has_osr_nmethod(int level, bool match_level) {
798 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
799 }
801 int mark_osr_nmethods() {
802 return method_holder()->mark_osr_nmethods(this);
803 }
805 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
806 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
807 }
809 // Inline cache support
810 void cleanup_inline_caches();
812 // Find if klass for method is loaded
813 bool is_klass_loaded_by_klass_index(int klass_index) const;
814 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
816 // Indicates whether compilation failed earlier for this method, or
817 // whether it is not compilable for another reason like having a
818 // breakpoint set in it.
819 bool is_not_compilable(int comp_level = CompLevel_any) const;
820 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
821 void set_not_compilable_quietly(int comp_level = CompLevel_all) {
822 set_not_compilable(comp_level, false);
823 }
824 bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
825 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
826 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
827 set_not_osr_compilable(comp_level, false);
828 }
829 bool is_always_compilable() const;
831 private:
832 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
834 public:
835 MethodCounters* get_method_counters(TRAPS) {
836 if (_method_counters == NULL) {
837 build_method_counters(this, CHECK_AND_CLEAR_NULL);
838 }
839 return _method_counters;
840 }
842 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
843 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
844 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
845 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
846 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
847 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); }
849 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
850 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
851 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit
852 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
853 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
854 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); }
856 // Background compilation support
857 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
858 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
859 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
861 // Resolve all classes in signature, return 'true' if successful
862 static bool load_signature_classes(methodHandle m, TRAPS);
864 // Return if true if not all classes references in signature, including return type, has been loaded
865 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
867 // Printing
868 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM
869 #if INCLUDE_JVMTI
870 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses
871 #else
872 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
873 #endif
875 // Helper routine used for method sorting
876 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
878 // Deallocation function for redefine classes or if an error occurs
879 void deallocate_contents(ClassLoaderData* loader_data);
881 // Printing
882 #ifndef PRODUCT
883 void print_on(outputStream* st) const;
884 #endif
885 void print_value_on(outputStream* st) const;
887 const char* internal_name() const { return "{method}"; }
889 // Check for valid method pointer
890 static bool has_method_vptr(const void* ptr);
891 bool is_valid_method() const;
893 // Verify
894 void verify() { verify_on(tty); }
895 void verify_on(outputStream* st);
897 private:
899 // Inlined elements
900 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
901 address* signature_handler_addr() const { return native_function_addr() + 1; }
902 };
905 // Utility class for compressing line number tables
907 class CompressedLineNumberWriteStream: public CompressedWriteStream {
908 private:
909 int _bci;
910 int _line;
911 public:
912 // Constructor
913 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
914 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
916 // Write (bci, line number) pair to stream
917 void write_pair_regular(int bci_delta, int line_delta);
919 inline void write_pair_inline(int bci, int line) {
920 int bci_delta = bci - _bci;
921 int line_delta = line - _line;
922 _bci = bci;
923 _line = line;
924 // Skip (0,0) deltas - they do not add information and conflict with terminator.
925 if (bci_delta == 0 && line_delta == 0) return;
926 // Check if bci is 5-bit and line number 3-bit unsigned.
927 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
928 // Compress into single byte.
929 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
930 // Check that value doesn't match escape character.
931 if (value != 0xFF) {
932 write_byte(value);
933 return;
934 }
935 }
936 write_pair_regular(bci_delta, line_delta);
937 }
939 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
940 // Disabling optimization doesn't work for methods in header files
941 // so we force it to call through the non-optimized version in the .cpp.
942 // It's gross, but it's the only way we can ensure that all callers are
943 // fixed. _MSC_VER is defined by the windows compiler
944 #if defined(_M_AMD64) && _MSC_VER >= 1400
945 void write_pair(int bci, int line);
946 #else
947 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
948 #endif
950 // Write end-of-stream marker
951 void write_terminator() { write_byte(0); }
952 };
955 // Utility class for decompressing line number tables
957 class CompressedLineNumberReadStream: public CompressedReadStream {
958 private:
959 int _bci;
960 int _line;
961 public:
962 // Constructor
963 CompressedLineNumberReadStream(u_char* buffer);
964 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
965 bool read_pair();
966 // Accessing bci and line number (after calling read_pair)
967 int bci() const { return _bci; }
968 int line() const { return _line; }
969 };
972 /// Fast Breakpoints.
974 // If this structure gets more complicated (because bpts get numerous),
975 // move it into its own header.
977 // There is presently no provision for concurrent access
978 // to breakpoint lists, which is only OK for JVMTI because
979 // breakpoints are written only at safepoints, and are read
980 // concurrently only outside of safepoints.
982 class BreakpointInfo : public CHeapObj<mtClass> {
983 friend class VMStructs;
984 private:
985 Bytecodes::Code _orig_bytecode;
986 int _bci;
987 u2 _name_index; // of method
988 u2 _signature_index; // of method
989 BreakpointInfo* _next; // simple storage allocation
991 public:
992 BreakpointInfo(Method* m, int bci);
994 // accessors
995 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
996 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
997 int bci() { return _bci; }
999 BreakpointInfo* next() const { return _next; }
1000 void set_next(BreakpointInfo* n) { _next = n; }
1002 // helps for searchers
1003 bool match(const Method* m, int bci) {
1004 return bci == _bci && match(m);
1005 }
1007 bool match(const Method* m) {
1008 return _name_index == m->name_index() &&
1009 _signature_index == m->signature_index();
1010 }
1012 void set(Method* method);
1013 void clear(Method* method);
1014 };
1016 // Utility class for access exception handlers
1017 class ExceptionTable : public StackObj {
1018 private:
1019 ExceptionTableElement* _table;
1020 u2 _length;
1022 public:
1023 ExceptionTable(const Method* m) {
1024 if (m->has_exception_handler()) {
1025 _table = m->exception_table_start();
1026 _length = m->exception_table_length();
1027 } else {
1028 _table = NULL;
1029 _length = 0;
1030 }
1031 }
1033 int length() const {
1034 return _length;
1035 }
1037 u2 start_pc(int idx) const {
1038 assert(idx < _length, "out of bounds");
1039 return _table[idx].start_pc;
1040 }
1042 void set_start_pc(int idx, u2 value) {
1043 assert(idx < _length, "out of bounds");
1044 _table[idx].start_pc = value;
1045 }
1047 u2 end_pc(int idx) const {
1048 assert(idx < _length, "out of bounds");
1049 return _table[idx].end_pc;
1050 }
1052 void set_end_pc(int idx, u2 value) {
1053 assert(idx < _length, "out of bounds");
1054 _table[idx].end_pc = value;
1055 }
1057 u2 handler_pc(int idx) const {
1058 assert(idx < _length, "out of bounds");
1059 return _table[idx].handler_pc;
1060 }
1062 void set_handler_pc(int idx, u2 value) {
1063 assert(idx < _length, "out of bounds");
1064 _table[idx].handler_pc = value;
1065 }
1067 u2 catch_type_index(int idx) const {
1068 assert(idx < _length, "out of bounds");
1069 return _table[idx].catch_type_index;
1070 }
1072 void set_catch_type_index(int idx, u2 value) {
1073 assert(idx < _length, "out of bounds");
1074 _table[idx].catch_type_index = value;
1075 }
1076 };
1078 #endif // SHARE_VM_OOPS_METHODOOP_HPP