Mon, 07 Oct 2013 12:20:28 -0400
8009130: Lambda: Fix access controls, loader constraints.
Summary: New default methods list with inherited superinterface methods
Reviewed-by: minqi, sspitsyn, coleenp
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP
26 #define SHARE_VM_OOPS_METHODOOP_HPP
28 #include "classfile/vmSymbols.hpp"
29 #include "code/compressedStream.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "interpreter/invocationCounter.hpp"
32 #include "oops/annotations.hpp"
33 #include "oops/constantPool.hpp"
34 #include "oops/methodCounters.hpp"
35 #include "oops/instanceKlass.hpp"
36 #include "oops/oop.hpp"
37 #include "oops/typeArrayOop.hpp"
38 #include "utilities/accessFlags.hpp"
39 #include "utilities/growableArray.hpp"
41 // A Method* represents a Java method.
42 //
43 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
44 // so keeping the size of this structure small has a big impact on footprint.
45 //
46 // We put all oops and method_size first for better gc cache locality.
47 //
48 // The actual bytecodes are inlined after the end of the Method struct.
49 //
50 // There are bits in the access_flags telling whether inlined tables are present.
51 // Note that accessing the line number and local variable tables is not performance critical at all.
52 // Accessing the checked exceptions table is used by reflection, so we put that last to make access
53 // to it fast.
54 //
55 // The line number table is compressed and inlined following the byte codes. It is found as the first
56 // byte following the byte codes. The checked exceptions table and the local variable table are inlined
57 // after the line number table, and indexed from the end of the method. We do not compress the checked
58 // exceptions table since the average length is less than 2, and do not bother to compress the local
59 // variable table either since it is mostly absent.
60 //
61 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
62 //
63 // |------------------------------------------------------|
64 // | header |
65 // | klass |
66 // |------------------------------------------------------|
67 // | ConstMethod* (oop) |
68 // |------------------------------------------------------|
69 // | methodData (oop) |
70 // | methodCounters |
71 // |------------------------------------------------------|
72 // | access_flags |
73 // | vtable_index |
74 // |------------------------------------------------------|
75 // | result_index (C++ interpreter only) |
76 // |------------------------------------------------------|
77 // | method_size | intrinsic_id| flags |
78 // |------------------------------------------------------|
79 // | code (pointer) |
80 // | i2i (pointer) |
81 // | adapter (pointer) |
82 // | from_compiled_entry (pointer) |
83 // | from_interpreted_entry (pointer) |
84 // |------------------------------------------------------|
85 // | native_function (present only if native) |
86 // | signature_handler (present only if native) |
87 // |------------------------------------------------------|
90 class CheckedExceptionElement;
91 class LocalVariableTableElement;
92 class AdapterHandlerEntry;
93 class MethodData;
94 class MethodCounters;
95 class ConstMethod;
96 class InlineTableSizes;
97 class KlassSizeStats;
99 class Method : public Metadata {
100 friend class VMStructs;
101 private:
102 ConstMethod* _constMethod; // Method read-only data.
103 MethodData* _method_data;
104 MethodCounters* _method_counters;
105 AccessFlags _access_flags; // Access flags
106 int _vtable_index; // vtable index of this method (see VtableIndexFlag)
107 // note: can have vtables with >2**16 elements (because of inheritance)
108 #ifdef CC_INTERP
109 int _result_index; // C++ interpreter needs for converting results to/from stack
110 #endif
111 u2 _method_size; // size of this object
112 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
113 u1 _jfr_towrite : 1, // Flags
114 _caller_sensitive : 1,
115 _force_inline : 1,
116 _hidden : 1,
117 _dont_inline : 1,
118 : 3;
120 #ifndef PRODUCT
121 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
122 #endif
123 // Entry point for calling both from and to the interpreter.
124 address _i2i_entry; // All-args-on-stack calling convention
125 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
126 AdapterHandlerEntry* _adapter;
127 // Entry point for calling from compiled code, to compiled code if it exists
128 // or else the interpreter.
129 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
130 // The entry point for calling both from and to compiled code is
131 // "_code->entry_point()". Because of tiered compilation and de-opt, this
132 // field can come and go. It can transition from NULL to not-null at any
133 // time (whenever a compile completes). It can transition from not-null to
134 // NULL only at safepoints (because of a de-opt).
135 nmethod* volatile _code; // Points to the corresponding piece of native code
136 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
138 // Constructor
139 Method(ConstMethod* xconst, AccessFlags access_flags, int size);
140 public:
142 static Method* allocate(ClassLoaderData* loader_data,
143 int byte_code_size,
144 AccessFlags access_flags,
145 InlineTableSizes* sizes,
146 ConstMethod::MethodType method_type,
147 TRAPS);
149 // CDS and vtbl checking can create an empty Method to get vtbl pointer.
150 Method(){}
152 // The Method vtable is restored by this call when the Method is in the
153 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
154 // all the gory details. SA, dtrace and pstack helpers distinguish metadata
155 // by their vtable.
156 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
157 bool is_method() const volatile { return true; }
159 // accessors for instance variables
161 ConstMethod* constMethod() const { return _constMethod; }
162 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
165 static address make_adapters(methodHandle mh, TRAPS);
166 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
167 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
169 // access flag
170 AccessFlags access_flags() const { return _access_flags; }
171 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
173 // name
174 Symbol* name() const { return constants()->symbol_at(name_index()); }
175 int name_index() const { return constMethod()->name_index(); }
176 void set_name_index(int index) { constMethod()->set_name_index(index); }
178 // signature
179 Symbol* signature() const { return constants()->symbol_at(signature_index()); }
180 int signature_index() const { return constMethod()->signature_index(); }
181 void set_signature_index(int index) { constMethod()->set_signature_index(index); }
183 // generics support
184 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
185 int generic_signature_index() const { return constMethod()->generic_signature_index(); }
186 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
188 // annotations support
189 AnnotationArray* annotations() const {
190 return constMethod()->method_annotations();
191 }
192 AnnotationArray* parameter_annotations() const {
193 return constMethod()->parameter_annotations();
194 }
195 AnnotationArray* annotation_default() const {
196 return constMethod()->default_annotations();
197 }
198 AnnotationArray* type_annotations() const {
199 return constMethod()->type_annotations();
200 }
202 #ifdef CC_INTERP
203 void set_result_index(BasicType type);
204 int result_index() { return _result_index; }
205 #endif
207 // Helper routine: get klass name + "." + method name + signature as
208 // C string, for the purpose of providing more useful NoSuchMethodErrors
209 // and fatal error handling. The string is allocated in resource
210 // area if a buffer is not provided by the caller.
211 char* name_and_sig_as_C_string() const;
212 char* name_and_sig_as_C_string(char* buf, int size) const;
214 // Static routine in the situations we don't have a Method*
215 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
216 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
218 Bytecodes::Code java_code_at(int bci) const {
219 return Bytecodes::java_code_at(this, bcp_from(bci));
220 }
221 Bytecodes::Code code_at(int bci) const {
222 return Bytecodes::code_at(this, bcp_from(bci));
223 }
225 // JVMTI breakpoints
226 Bytecodes::Code orig_bytecode_at(int bci) const;
227 void set_orig_bytecode_at(int bci, Bytecodes::Code code);
228 void set_breakpoint(int bci);
229 void clear_breakpoint(int bci);
230 void clear_all_breakpoints();
231 // Tracking number of breakpoints, for fullspeed debugging.
232 // Only mutated by VM thread.
233 u2 number_of_breakpoints() const {
234 if (method_counters() == NULL) {
235 return 0;
236 } else {
237 return method_counters()->number_of_breakpoints();
238 }
239 }
240 void incr_number_of_breakpoints(TRAPS) {
241 MethodCounters* mcs = get_method_counters(CHECK);
242 if (mcs != NULL) {
243 mcs->incr_number_of_breakpoints();
244 }
245 }
246 void decr_number_of_breakpoints(TRAPS) {
247 MethodCounters* mcs = get_method_counters(CHECK);
248 if (mcs != NULL) {
249 mcs->decr_number_of_breakpoints();
250 }
251 }
252 // Initialization only
253 void clear_number_of_breakpoints() {
254 if (method_counters() != NULL) {
255 method_counters()->clear_number_of_breakpoints();
256 }
257 }
259 // index into InstanceKlass methods() array
260 // note: also used by jfr
261 u2 method_idnum() const { return constMethod()->method_idnum(); }
262 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
264 // code size
265 int code_size() const { return constMethod()->code_size(); }
267 // method size
268 int method_size() const { return _method_size; }
269 void set_method_size(int size) {
270 assert(0 <= size && size < (1 << 16), "invalid method size");
271 _method_size = size;
272 }
274 // constant pool for Klass* holding this method
275 ConstantPool* constants() const { return constMethod()->constants(); }
276 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
278 // max stack
279 // return original max stack size for method verification
280 int verifier_max_stack() const { return constMethod()->max_stack(); }
281 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
282 void set_max_stack(int size) { constMethod()->set_max_stack(size); }
284 // max locals
285 int max_locals() const { return constMethod()->max_locals(); }
286 void set_max_locals(int size) { constMethod()->set_max_locals(size); }
288 int highest_comp_level() const;
289 void set_highest_comp_level(int level);
290 int highest_osr_comp_level() const;
291 void set_highest_osr_comp_level(int level);
293 // Count of times method was exited via exception while interpreting
294 void interpreter_throwout_increment(TRAPS) {
295 MethodCounters* mcs = get_method_counters(CHECK);
296 if (mcs != NULL) {
297 mcs->interpreter_throwout_increment();
298 }
299 }
301 int interpreter_throwout_count() const {
302 if (method_counters() == NULL) {
303 return 0;
304 } else {
305 return method_counters()->interpreter_throwout_count();
306 }
307 }
309 // size of parameters
310 int size_of_parameters() const { return constMethod()->size_of_parameters(); }
311 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
313 bool has_stackmap_table() const {
314 return constMethod()->has_stackmap_table();
315 }
317 Array<u1>* stackmap_data() const {
318 return constMethod()->stackmap_data();
319 }
321 void set_stackmap_data(Array<u1>* sd) {
322 constMethod()->set_stackmap_data(sd);
323 }
325 // exception handler table
326 bool has_exception_handler() const
327 { return constMethod()->has_exception_handler(); }
328 int exception_table_length() const
329 { return constMethod()->exception_table_length(); }
330 ExceptionTableElement* exception_table_start() const
331 { return constMethod()->exception_table_start(); }
333 // Finds the first entry point bci of an exception handler for an
334 // exception of klass ex_klass thrown at throw_bci. A value of NULL
335 // for ex_klass indicates that the exception klass is not known; in
336 // this case it matches any constraint class. Returns -1 if the
337 // exception cannot be handled in this method. The handler
338 // constraint classes are loaded if necessary. Note that this may
339 // throw an exception if loading of the constraint classes causes
340 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
341 // If an exception is thrown, returns the bci of the
342 // exception handler which caused the exception to be thrown, which
343 // is needed for proper retries. See, for example,
344 // InterpreterRuntime::exception_handler_for_exception.
345 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
347 // method data access
348 MethodData* method_data() const {
349 return _method_data;
350 }
352 void set_method_data(MethodData* data) {
353 _method_data = data;
354 }
356 MethodCounters* method_counters() const {
357 return _method_counters;
358 }
361 void set_method_counters(MethodCounters* counters) {
362 _method_counters = counters;
363 }
365 #ifdef TIERED
366 // We are reusing interpreter_invocation_count as a holder for the previous event count!
367 // We can do that since interpreter_invocation_count is not used in tiered.
368 int prev_event_count() const {
369 if (method_counters() == NULL) {
370 return 0;
371 } else {
372 return method_counters()->interpreter_invocation_count();
373 }
374 }
375 void set_prev_event_count(int count, TRAPS) {
376 MethodCounters* mcs = get_method_counters(CHECK);
377 if (mcs != NULL) {
378 mcs->set_interpreter_invocation_count(count);
379 }
380 }
381 jlong prev_time() const {
382 return method_counters() == NULL ? 0 : method_counters()->prev_time();
383 }
384 void set_prev_time(jlong time, TRAPS) {
385 MethodCounters* mcs = get_method_counters(CHECK);
386 if (mcs != NULL) {
387 mcs->set_prev_time(time);
388 }
389 }
390 float rate() const {
391 return method_counters() == NULL ? 0 : method_counters()->rate();
392 }
393 void set_rate(float rate, TRAPS) {
394 MethodCounters* mcs = get_method_counters(CHECK);
395 if (mcs != NULL) {
396 mcs->set_rate(rate);
397 }
398 }
399 #endif
401 int invocation_count();
402 int backedge_count();
404 bool was_executed_more_than(int n);
405 bool was_never_executed() { return !was_executed_more_than(0); }
407 static void build_interpreter_method_data(methodHandle method, TRAPS);
409 static MethodCounters* build_method_counters(Method* m, TRAPS);
411 int interpreter_invocation_count() {
412 if (TieredCompilation) return invocation_count();
413 else return (method_counters() == NULL) ? 0 :
414 method_counters()->interpreter_invocation_count();
415 }
416 int increment_interpreter_invocation_count(TRAPS) {
417 if (TieredCompilation) ShouldNotReachHere();
418 MethodCounters* mcs = get_method_counters(CHECK_0);
419 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
420 }
422 #ifndef PRODUCT
423 int compiled_invocation_count() const { return _compiled_invocation_count; }
424 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
425 #endif // not PRODUCT
427 // Clear (non-shared space) pointers which could not be relevant
428 // if this (shared) method were mapped into another JVM.
429 void remove_unshareable_info();
431 // nmethod/verified compiler entry
432 address verified_code_entry();
433 bool check_code() const; // Not inline to avoid circular ref
434 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
435 void clear_code(); // Clear out any compiled code
436 static void set_code(methodHandle mh, nmethod* code);
437 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
438 address get_i2c_entry();
439 address get_c2i_entry();
440 address get_c2i_unverified_entry();
441 AdapterHandlerEntry* adapter() { return _adapter; }
442 // setup entry points
443 void link_method(methodHandle method, TRAPS);
444 // clear entry points. Used by sharing code
445 void unlink_method();
447 // vtable index
448 enum VtableIndexFlag {
449 // Valid vtable indexes are non-negative (>= 0).
450 // These few negative values are used as sentinels.
451 itable_index_max = -10, // first itable index, growing downward
452 pending_itable_index = -9, // itable index will be assigned
453 invalid_vtable_index = -4, // distinct from any valid vtable index
454 garbage_vtable_index = -3, // not yet linked; no vtable layout yet
455 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
456 // 6330203 Note: Do not use -1, which was overloaded with many meanings.
457 };
458 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
459 bool has_vtable_index() const { return _vtable_index >= 0; }
460 int vtable_index() const { return _vtable_index; }
461 void set_vtable_index(int index) { _vtable_index = index; }
462 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
463 bool has_itable_index() const { return _vtable_index <= itable_index_max; }
464 int itable_index() const { assert(valid_itable_index(), "");
465 return itable_index_max - _vtable_index; }
466 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
468 // interpreter entry
469 address interpreter_entry() const { return _i2i_entry; }
470 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
471 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
473 // native function (used for native methods only)
474 enum {
475 native_bind_event_is_interesting = true
476 };
477 address native_function() const { return *(native_function_addr()); }
478 address critical_native_function();
480 // Must specify a real function (not NULL).
481 // Use clear_native_function() to unregister.
482 void set_native_function(address function, bool post_event_flag);
483 bool has_native_function() const;
484 void clear_native_function();
486 // signature handler (used for native methods only)
487 address signature_handler() const { return *(signature_handler_addr()); }
488 void set_signature_handler(address handler);
490 // Interpreter oopmap support
491 void mask_for(int bci, InterpreterOopMap* mask);
493 #ifndef PRODUCT
494 // operations on invocation counter
495 void print_invocation_count();
496 #endif
498 // byte codes
499 void set_code(address code) { return constMethod()->set_code(code); }
500 address code_base() const { return constMethod()->code_base(); }
501 bool contains(address bcp) const { return constMethod()->contains(bcp); }
503 // prints byte codes
504 void print_codes() const { print_codes_on(tty); }
505 void print_codes_on(outputStream* st) const PRODUCT_RETURN;
506 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
508 // method parameters
509 bool has_method_parameters() const
510 { return constMethod()->has_method_parameters(); }
511 int method_parameters_length() const
512 { return constMethod()->method_parameters_length(); }
513 MethodParametersElement* method_parameters_start() const
514 { return constMethod()->method_parameters_start(); }
516 // checked exceptions
517 int checked_exceptions_length() const
518 { return constMethod()->checked_exceptions_length(); }
519 CheckedExceptionElement* checked_exceptions_start() const
520 { return constMethod()->checked_exceptions_start(); }
522 // localvariable table
523 bool has_localvariable_table() const
524 { return constMethod()->has_localvariable_table(); }
525 int localvariable_table_length() const
526 { return constMethod()->localvariable_table_length(); }
527 LocalVariableTableElement* localvariable_table_start() const
528 { return constMethod()->localvariable_table_start(); }
530 bool has_linenumber_table() const
531 { return constMethod()->has_linenumber_table(); }
532 u_char* compressed_linenumber_table() const
533 { return constMethod()->compressed_linenumber_table(); }
535 // method holder (the Klass* holding this method)
536 InstanceKlass* method_holder() const { return constants()->pool_holder(); }
538 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
539 Symbol* klass_name() const; // returns the name of the method holder
540 BasicType result_type() const; // type of the method result
541 int result_type_index() const; // type index of the method result
542 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
543 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
545 // Checked exceptions thrown by this method (resolved to mirrors)
546 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
548 // Access flags
549 bool is_public() const { return access_flags().is_public(); }
550 bool is_private() const { return access_flags().is_private(); }
551 bool is_protected() const { return access_flags().is_protected(); }
552 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
553 bool is_static() const { return access_flags().is_static(); }
554 bool is_final() const { return access_flags().is_final(); }
555 bool is_synchronized() const { return access_flags().is_synchronized();}
556 bool is_native() const { return access_flags().is_native(); }
557 bool is_abstract() const { return access_flags().is_abstract(); }
558 bool is_strict() const { return access_flags().is_strict(); }
559 bool is_synthetic() const { return access_flags().is_synthetic(); }
561 // returns true if contains only return operation
562 bool is_empty_method() const;
564 // returns true if this is a vanilla constructor
565 bool is_vanilla_constructor() const;
567 // checks method and its method holder
568 bool is_final_method() const;
569 bool is_final_method(AccessFlags class_access_flags) const;
570 bool is_default_method() const;
572 // true if method needs no dynamic dispatch (final and/or no vtable entry)
573 bool can_be_statically_bound() const;
574 bool can_be_statically_bound(AccessFlags class_access_flags) const;
576 // returns true if the method has any backward branches.
577 bool has_loops() {
578 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
579 };
581 bool compute_has_loops_flag();
583 bool has_jsrs() {
584 return access_flags().has_jsrs();
585 };
586 void set_has_jsrs() {
587 _access_flags.set_has_jsrs();
588 }
590 // returns true if the method has any monitors.
591 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
592 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
594 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
596 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
597 // propererly nest in the method. It might return false, even though they actually nest properly, since the info.
598 // has not been computed yet.
599 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
600 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
602 // returns true if the method is an accessor function (setter/getter).
603 bool is_accessor() const;
605 // returns true if the method is an initializer (<init> or <clinit>).
606 bool is_initializer() const;
608 // returns true if the method is static OR if the classfile version < 51
609 bool has_valid_initializer_flags() const;
611 // returns true if the method name is <clinit> and the method has
612 // valid static initializer flags.
613 bool is_static_initializer() const;
615 // compiled code support
616 // NOTE: code() is inherently racy as deopt can be clearing code
617 // simultaneously. Use with caution.
618 bool has_compiled_code() const { return code() != NULL; }
620 // sizing
621 static int header_size() { return sizeof(Method)/HeapWordSize; }
622 static int size(bool is_native);
623 int size() const { return method_size(); }
624 #if INCLUDE_SERVICES
625 void collect_statistics(KlassSizeStats *sz) const;
626 #endif
628 // interpreter support
629 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
630 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
631 #ifdef CC_INTERP
632 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); }
633 #endif /* CC_INTERP */
634 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
635 static ByteSize code_offset() { return byte_offset_of(Method, _code); }
636 static ByteSize method_data_offset() {
637 return byte_offset_of(Method, _method_data);
638 }
639 static ByteSize method_counters_offset() {
640 return byte_offset_of(Method, _method_counters);
641 }
642 #ifndef PRODUCT
643 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
644 #endif // not PRODUCT
645 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
646 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
647 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
648 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
650 // for code generation
651 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
652 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
653 static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
655 // Static methods that are used to implement member methods where an exposed this pointer
656 // is needed due to possible GCs
657 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
659 // Returns the byte code index from the byte code pointer
660 int bci_from(address bcp) const;
661 address bcp_from(int bci) const;
662 int validate_bci_from_bcx(intptr_t bcx) const;
664 // Returns the line number for a bci if debugging information for the method is prowided,
665 // -1 is returned otherwise.
666 int line_number_from_bci(int bci) const;
668 // Reflection support
669 bool is_overridden_in(Klass* k) const;
671 // Stack walking support
672 bool is_ignored_by_security_stack_walk() const;
674 // JSR 292 support
675 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
676 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
677 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
678 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
679 Symbol* signature, //anything at all
680 TRAPS);
681 static Klass* check_non_bcp_klass(Klass* klass);
683 // How many extra stack entries for invokedynamic when it's enabled
684 static const int extra_stack_entries_for_jsr292 = 1;
686 // this operates only on invoke methods:
687 // presize interpreter frames for extra interpreter stack entries, if needed
688 // Account for the extra appendix argument for invokehandle/invokedynamic
689 static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
690 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
692 // RedefineClasses() support:
693 bool is_old() const { return access_flags().is_old(); }
694 void set_is_old() { _access_flags.set_is_old(); }
695 bool is_obsolete() const { return access_flags().is_obsolete(); }
696 void set_is_obsolete() { _access_flags.set_is_obsolete(); }
697 bool on_stack() const { return access_flags().on_stack(); }
698 void set_on_stack(const bool value);
700 // see the definition in Method*.cpp for the gory details
701 bool should_not_be_cached() const;
703 // JVMTI Native method prefixing support:
704 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
705 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
707 // Rewriting support
708 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
709 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
711 // jmethodID handling
712 // Because the useful life-span of a jmethodID cannot be determined,
713 // once created they are never reclaimed. The methods to which they refer,
714 // however, can be GC'ed away if the class is unloaded or if the method is
715 // made obsolete or deleted -- in these cases, the jmethodID
716 // refers to NULL (as is the case for any weak reference).
717 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
718 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
720 // Use resolve_jmethod_id() in situations where the caller is expected
721 // to provide a valid jmethodID; the only sanity checks are in asserts;
722 // result guaranteed not to be NULL.
723 inline static Method* resolve_jmethod_id(jmethodID mid) {
724 assert(mid != NULL, "JNI method id should not be null");
725 return *((Method**)mid);
726 }
728 // Use checked_resolve_jmethod_id() in situations where the caller
729 // should provide a valid jmethodID, but might not. NULL is returned
730 // when the jmethodID does not refer to a valid method.
731 static Method* checked_resolve_jmethod_id(jmethodID mid);
733 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
734 static bool is_method_id(jmethodID mid);
736 // Clear methods
737 static void clear_jmethod_ids(ClassLoaderData* loader_data);
738 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
740 // Get this method's jmethodID -- allocate if it doesn't exist
741 jmethodID jmethod_id() { methodHandle this_h(this);
742 return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
744 // Lookup the jmethodID for this method. Return NULL if not found.
745 // NOTE that this function can be called from a signal handler
746 // (see AsyncGetCallTrace support for Forte Analyzer) and this
747 // needs to be async-safe. No allocation should be done and
748 // so handles are not used to avoid deadlock.
749 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
751 // Support for inlining of intrinsic methods
752 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
753 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
755 // Helper routines for intrinsic_id() and vmIntrinsics::method().
756 void init_intrinsic_id(); // updates from _none if a match
757 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
759 bool jfr_towrite() { return _jfr_towrite; }
760 void set_jfr_towrite(bool x) { _jfr_towrite = x; }
761 bool caller_sensitive() { return _caller_sensitive; }
762 void set_caller_sensitive(bool x) { _caller_sensitive = x; }
763 bool force_inline() { return _force_inline; }
764 void set_force_inline(bool x) { _force_inline = x; }
765 bool dont_inline() { return _dont_inline; }
766 void set_dont_inline(bool x) { _dont_inline = x; }
767 bool is_hidden() { return _hidden; }
768 void set_hidden(bool x) { _hidden = x; }
769 ConstMethod::MethodType method_type() const {
770 return _constMethod->method_type();
771 }
772 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
774 // On-stack replacement support
775 bool has_osr_nmethod(int level, bool match_level) {
776 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
777 }
779 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
780 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
781 }
783 // Inline cache support
784 void cleanup_inline_caches();
786 // Find if klass for method is loaded
787 bool is_klass_loaded_by_klass_index(int klass_index) const;
788 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
790 // Indicates whether compilation failed earlier for this method, or
791 // whether it is not compilable for another reason like having a
792 // breakpoint set in it.
793 bool is_not_compilable(int comp_level = CompLevel_any) const;
794 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
795 void set_not_compilable_quietly(int comp_level = CompLevel_all) {
796 set_not_compilable(comp_level, false);
797 }
798 bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
799 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
800 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
801 set_not_osr_compilable(comp_level, false);
802 }
803 bool is_always_compilable() const;
805 private:
806 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
808 MethodCounters* get_method_counters(TRAPS) {
809 if (_method_counters == NULL) {
810 build_method_counters(this, CHECK_AND_CLEAR_NULL);
811 }
812 return _method_counters;
813 }
815 public:
816 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
817 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
818 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
819 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
820 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
821 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); }
823 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
824 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
825 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit
826 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
827 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
828 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); }
830 // Background compilation support
831 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
832 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
833 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
835 // Resolve all classes in signature, return 'true' if successful
836 static bool load_signature_classes(methodHandle m, TRAPS);
838 // Return if true if not all classes references in signature, including return type, has been loaded
839 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
841 // Printing
842 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM
843 #if INCLUDE_JVMTI
844 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses
845 #else
846 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
847 #endif
849 // Helper routine used for method sorting
850 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
852 // Deallocation function for redefine classes or if an error occurs
853 void deallocate_contents(ClassLoaderData* loader_data);
855 // Printing
856 #ifndef PRODUCT
857 void print_on(outputStream* st) const;
858 #endif
859 void print_value_on(outputStream* st) const;
861 const char* internal_name() const { return "{method}"; }
863 // Check for valid method pointer
864 bool is_valid_method() const;
866 // Verify
867 void verify() { verify_on(tty); }
868 void verify_on(outputStream* st);
870 private:
872 // Inlined elements
873 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
874 address* signature_handler_addr() const { return native_function_addr() + 1; }
875 };
878 // Utility class for compressing line number tables
880 class CompressedLineNumberWriteStream: public CompressedWriteStream {
881 private:
882 int _bci;
883 int _line;
884 public:
885 // Constructor
886 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
887 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
889 // Write (bci, line number) pair to stream
890 void write_pair_regular(int bci_delta, int line_delta);
892 inline void write_pair_inline(int bci, int line) {
893 int bci_delta = bci - _bci;
894 int line_delta = line - _line;
895 _bci = bci;
896 _line = line;
897 // Skip (0,0) deltas - they do not add information and conflict with terminator.
898 if (bci_delta == 0 && line_delta == 0) return;
899 // Check if bci is 5-bit and line number 3-bit unsigned.
900 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
901 // Compress into single byte.
902 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
903 // Check that value doesn't match escape character.
904 if (value != 0xFF) {
905 write_byte(value);
906 return;
907 }
908 }
909 write_pair_regular(bci_delta, line_delta);
910 }
912 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
913 // Disabling optimization doesn't work for methods in header files
914 // so we force it to call through the non-optimized version in the .cpp.
915 // It's gross, but it's the only way we can ensure that all callers are
916 // fixed. _MSC_VER is defined by the windows compiler
917 #if defined(_M_AMD64) && _MSC_VER >= 1400
918 void write_pair(int bci, int line);
919 #else
920 void write_pair(int bci, int line) { write_pair_inline(bci, line); }
921 #endif
923 // Write end-of-stream marker
924 void write_terminator() { write_byte(0); }
925 };
928 // Utility class for decompressing line number tables
930 class CompressedLineNumberReadStream: public CompressedReadStream {
931 private:
932 int _bci;
933 int _line;
934 public:
935 // Constructor
936 CompressedLineNumberReadStream(u_char* buffer);
937 // Read (bci, line number) pair from stream. Returns false at end-of-stream.
938 bool read_pair();
939 // Accessing bci and line number (after calling read_pair)
940 int bci() const { return _bci; }
941 int line() const { return _line; }
942 };
945 /// Fast Breakpoints.
947 // If this structure gets more complicated (because bpts get numerous),
948 // move it into its own header.
950 // There is presently no provision for concurrent access
951 // to breakpoint lists, which is only OK for JVMTI because
952 // breakpoints are written only at safepoints, and are read
953 // concurrently only outside of safepoints.
955 class BreakpointInfo : public CHeapObj<mtClass> {
956 friend class VMStructs;
957 private:
958 Bytecodes::Code _orig_bytecode;
959 int _bci;
960 u2 _name_index; // of method
961 u2 _signature_index; // of method
962 BreakpointInfo* _next; // simple storage allocation
964 public:
965 BreakpointInfo(Method* m, int bci);
967 // accessors
968 Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
969 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
970 int bci() { return _bci; }
972 BreakpointInfo* next() const { return _next; }
973 void set_next(BreakpointInfo* n) { _next = n; }
975 // helps for searchers
976 bool match(const Method* m, int bci) {
977 return bci == _bci && match(m);
978 }
980 bool match(const Method* m) {
981 return _name_index == m->name_index() &&
982 _signature_index == m->signature_index();
983 }
985 void set(Method* method);
986 void clear(Method* method);
987 };
989 // Utility class for access exception handlers
990 class ExceptionTable : public StackObj {
991 private:
992 ExceptionTableElement* _table;
993 u2 _length;
995 public:
996 ExceptionTable(const Method* m) {
997 if (m->has_exception_handler()) {
998 _table = m->exception_table_start();
999 _length = m->exception_table_length();
1000 } else {
1001 _table = NULL;
1002 _length = 0;
1003 }
1004 }
1006 int length() const {
1007 return _length;
1008 }
1010 u2 start_pc(int idx) const {
1011 assert(idx < _length, "out of bounds");
1012 return _table[idx].start_pc;
1013 }
1015 void set_start_pc(int idx, u2 value) {
1016 assert(idx < _length, "out of bounds");
1017 _table[idx].start_pc = value;
1018 }
1020 u2 end_pc(int idx) const {
1021 assert(idx < _length, "out of bounds");
1022 return _table[idx].end_pc;
1023 }
1025 void set_end_pc(int idx, u2 value) {
1026 assert(idx < _length, "out of bounds");
1027 _table[idx].end_pc = value;
1028 }
1030 u2 handler_pc(int idx) const {
1031 assert(idx < _length, "out of bounds");
1032 return _table[idx].handler_pc;
1033 }
1035 void set_handler_pc(int idx, u2 value) {
1036 assert(idx < _length, "out of bounds");
1037 _table[idx].handler_pc = value;
1038 }
1040 u2 catch_type_index(int idx) const {
1041 assert(idx < _length, "out of bounds");
1042 return _table[idx].catch_type_index;
1043 }
1045 void set_catch_type_index(int idx, u2 value) {
1046 assert(idx < _length, "out of bounds");
1047 _table[idx].catch_type_index = value;
1048 }
1049 };
1051 #endif // SHARE_VM_OOPS_METHODOOP_HPP