1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/oops/method.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1059 @@ 1.4 +/* 1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_OOPS_METHODOOP_HPP 1.29 +#define SHARE_VM_OOPS_METHODOOP_HPP 1.30 + 1.31 +#include "classfile/vmSymbols.hpp" 1.32 +#include "code/compressedStream.hpp" 1.33 +#include "compiler/oopMap.hpp" 1.34 +#include "interpreter/invocationCounter.hpp" 1.35 +#include "oops/annotations.hpp" 1.36 +#include "oops/constantPool.hpp" 1.37 +#include "oops/methodCounters.hpp" 1.38 +#include "oops/instanceKlass.hpp" 1.39 +#include "oops/oop.hpp" 1.40 +#include "oops/typeArrayOop.hpp" 1.41 +#include "utilities/accessFlags.hpp" 1.42 +#include "utilities/growableArray.hpp" 1.43 + 1.44 +// A Method* represents a Java method. 1.45 +// 1.46 +// Memory layout (each line represents a word). Note that most applications load thousands of methods, 1.47 +// so keeping the size of this structure small has a big impact on footprint. 1.48 +// 1.49 +// We put all oops and method_size first for better gc cache locality. 1.50 +// 1.51 +// The actual bytecodes are inlined after the end of the Method struct. 1.52 +// 1.53 +// There are bits in the access_flags telling whether inlined tables are present. 1.54 +// Note that accessing the line number and local variable tables is not performance critical at all. 1.55 +// Accessing the checked exceptions table is used by reflection, so we put that last to make access 1.56 +// to it fast. 1.57 +// 1.58 +// The line number table is compressed and inlined following the byte codes. It is found as the first 1.59 +// byte following the byte codes. The checked exceptions table and the local variable table are inlined 1.60 +// after the line number table, and indexed from the end of the method. We do not compress the checked 1.61 +// exceptions table since the average length is less than 2, and do not bother to compress the local 1.62 +// variable table either since it is mostly absent. 1.63 +// 1.64 +// Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter) 1.65 +// 1.66 +// |------------------------------------------------------| 1.67 +// | header | 1.68 +// | klass | 1.69 +// |------------------------------------------------------| 1.70 +// | ConstMethod* (oop) | 1.71 +// |------------------------------------------------------| 1.72 +// | methodData (oop) | 1.73 +// | methodCounters | 1.74 +// |------------------------------------------------------| 1.75 +// | access_flags | 1.76 +// | vtable_index | 1.77 +// |------------------------------------------------------| 1.78 +// | result_index (C++ interpreter only) | 1.79 +// |------------------------------------------------------| 1.80 +// | method_size | intrinsic_id| flags | 1.81 +// |------------------------------------------------------| 1.82 +// | code (pointer) | 1.83 +// | i2i (pointer) | 1.84 +// | adapter (pointer) | 1.85 +// | from_compiled_entry (pointer) | 1.86 +// | from_interpreted_entry (pointer) | 1.87 +// |------------------------------------------------------| 1.88 +// | native_function (present only if native) | 1.89 +// | signature_handler (present only if native) | 1.90 +// |------------------------------------------------------| 1.91 + 1.92 + 1.93 +class CheckedExceptionElement; 1.94 +class LocalVariableTableElement; 1.95 +class AdapterHandlerEntry; 1.96 +class MethodData; 1.97 +class MethodCounters; 1.98 +class ConstMethod; 1.99 +class InlineTableSizes; 1.100 +class KlassSizeStats; 1.101 + 1.102 +class Method : public Metadata { 1.103 + friend class VMStructs; 1.104 + private: 1.105 + ConstMethod* _constMethod; // Method read-only data. 1.106 + MethodData* _method_data; 1.107 + MethodCounters* _method_counters; 1.108 + AccessFlags _access_flags; // Access flags 1.109 + int _vtable_index; // vtable index of this method (see VtableIndexFlag) 1.110 + // note: can have vtables with >2**16 elements (because of inheritance) 1.111 +#ifdef CC_INTERP 1.112 + int _result_index; // C++ interpreter needs for converting results to/from stack 1.113 +#endif 1.114 + u2 _method_size; // size of this object 1.115 + u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 1.116 + u1 _jfr_towrite : 1, // Flags 1.117 + _caller_sensitive : 1, 1.118 + _force_inline : 1, 1.119 + _hidden : 1, 1.120 + _dont_inline : 1, 1.121 + : 3; 1.122 + 1.123 +#ifndef PRODUCT 1.124 + int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 1.125 +#endif 1.126 + // Entry point for calling both from and to the interpreter. 1.127 + address _i2i_entry; // All-args-on-stack calling convention 1.128 + // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. 1.129 + AdapterHandlerEntry* _adapter; 1.130 + // Entry point for calling from compiled code, to compiled code if it exists 1.131 + // or else the interpreter. 1.132 + volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 1.133 + // The entry point for calling both from and to compiled code is 1.134 + // "_code->entry_point()". Because of tiered compilation and de-opt, this 1.135 + // field can come and go. It can transition from NULL to not-null at any 1.136 + // time (whenever a compile completes). It can transition from not-null to 1.137 + // NULL only at safepoints (because of a de-opt). 1.138 + nmethod* volatile _code; // Points to the corresponding piece of native code 1.139 + volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 1.140 + 1.141 + // Constructor 1.142 + Method(ConstMethod* xconst, AccessFlags access_flags, int size); 1.143 + public: 1.144 + 1.145 + static Method* allocate(ClassLoaderData* loader_data, 1.146 + int byte_code_size, 1.147 + AccessFlags access_flags, 1.148 + InlineTableSizes* sizes, 1.149 + ConstMethod::MethodType method_type, 1.150 + TRAPS); 1.151 + 1.152 + // CDS and vtbl checking can create an empty Method to get vtbl pointer. 1.153 + Method(){} 1.154 + 1.155 + // The Method vtable is restored by this call when the Method is in the 1.156 + // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for 1.157 + // all the gory details. SA, dtrace and pstack helpers distinguish metadata 1.158 + // by their vtable. 1.159 + void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); } 1.160 + bool is_method() const volatile { return true; } 1.161 + 1.162 + void restore_unshareable_info(TRAPS); 1.163 + 1.164 + // accessors for instance variables 1.165 + 1.166 + ConstMethod* constMethod() const { return _constMethod; } 1.167 + void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 1.168 + 1.169 + 1.170 + static address make_adapters(methodHandle mh, TRAPS); 1.171 + volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 1.172 + volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 1.173 + 1.174 + // access flag 1.175 + AccessFlags access_flags() const { return _access_flags; } 1.176 + void set_access_flags(AccessFlags flags) { _access_flags = flags; } 1.177 + 1.178 + // name 1.179 + Symbol* name() const { return constants()->symbol_at(name_index()); } 1.180 + int name_index() const { return constMethod()->name_index(); } 1.181 + void set_name_index(int index) { constMethod()->set_name_index(index); } 1.182 + 1.183 + // signature 1.184 + Symbol* signature() const { return constants()->symbol_at(signature_index()); } 1.185 + int signature_index() const { return constMethod()->signature_index(); } 1.186 + void set_signature_index(int index) { constMethod()->set_signature_index(index); } 1.187 + 1.188 + // generics support 1.189 + Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 1.190 + int generic_signature_index() const { return constMethod()->generic_signature_index(); } 1.191 + void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 1.192 + 1.193 + // annotations support 1.194 + AnnotationArray* annotations() const { 1.195 + return constMethod()->method_annotations(); 1.196 + } 1.197 + AnnotationArray* parameter_annotations() const { 1.198 + return constMethod()->parameter_annotations(); 1.199 + } 1.200 + AnnotationArray* annotation_default() const { 1.201 + return constMethod()->default_annotations(); 1.202 + } 1.203 + AnnotationArray* type_annotations() const { 1.204 + return constMethod()->type_annotations(); 1.205 + } 1.206 + 1.207 +#ifdef CC_INTERP 1.208 + void set_result_index(BasicType type); 1.209 + int result_index() { return _result_index; } 1.210 +#endif 1.211 + 1.212 + // Helper routine: get klass name + "." + method name + signature as 1.213 + // C string, for the purpose of providing more useful NoSuchMethodErrors 1.214 + // and fatal error handling. The string is allocated in resource 1.215 + // area if a buffer is not provided by the caller. 1.216 + char* name_and_sig_as_C_string() const; 1.217 + char* name_and_sig_as_C_string(char* buf, int size) const; 1.218 + 1.219 + // Static routine in the situations we don't have a Method* 1.220 + static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 1.221 + static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 1.222 + 1.223 + Bytecodes::Code java_code_at(int bci) const { 1.224 + return Bytecodes::java_code_at(this, bcp_from(bci)); 1.225 + } 1.226 + Bytecodes::Code code_at(int bci) const { 1.227 + return Bytecodes::code_at(this, bcp_from(bci)); 1.228 + } 1.229 + 1.230 + // JVMTI breakpoints 1.231 + Bytecodes::Code orig_bytecode_at(int bci) const; 1.232 + void set_orig_bytecode_at(int bci, Bytecodes::Code code); 1.233 + void set_breakpoint(int bci); 1.234 + void clear_breakpoint(int bci); 1.235 + void clear_all_breakpoints(); 1.236 + // Tracking number of breakpoints, for fullspeed debugging. 1.237 + // Only mutated by VM thread. 1.238 + u2 number_of_breakpoints() const { 1.239 + if (method_counters() == NULL) { 1.240 + return 0; 1.241 + } else { 1.242 + return method_counters()->number_of_breakpoints(); 1.243 + } 1.244 + } 1.245 + void incr_number_of_breakpoints(TRAPS) { 1.246 + MethodCounters* mcs = get_method_counters(CHECK); 1.247 + if (mcs != NULL) { 1.248 + mcs->incr_number_of_breakpoints(); 1.249 + } 1.250 + } 1.251 + void decr_number_of_breakpoints(TRAPS) { 1.252 + MethodCounters* mcs = get_method_counters(CHECK); 1.253 + if (mcs != NULL) { 1.254 + mcs->decr_number_of_breakpoints(); 1.255 + } 1.256 + } 1.257 + // Initialization only 1.258 + void clear_number_of_breakpoints() { 1.259 + if (method_counters() != NULL) { 1.260 + method_counters()->clear_number_of_breakpoints(); 1.261 + } 1.262 + } 1.263 + 1.264 + // index into InstanceKlass methods() array 1.265 + // note: also used by jfr 1.266 + u2 method_idnum() const { return constMethod()->method_idnum(); } 1.267 + void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 1.268 + 1.269 + // code size 1.270 + int code_size() const { return constMethod()->code_size(); } 1.271 + 1.272 + // method size 1.273 + int method_size() const { return _method_size; } 1.274 + void set_method_size(int size) { 1.275 + assert(0 <= size && size < (1 << 16), "invalid method size"); 1.276 + _method_size = size; 1.277 + } 1.278 + 1.279 + // constant pool for Klass* holding this method 1.280 + ConstantPool* constants() const { return constMethod()->constants(); } 1.281 + void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 1.282 + 1.283 + // max stack 1.284 + // return original max stack size for method verification 1.285 + int verifier_max_stack() const { return constMethod()->max_stack(); } 1.286 + int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 1.287 + void set_max_stack(int size) { constMethod()->set_max_stack(size); } 1.288 + 1.289 + // max locals 1.290 + int max_locals() const { return constMethod()->max_locals(); } 1.291 + void set_max_locals(int size) { constMethod()->set_max_locals(size); } 1.292 + 1.293 + int highest_comp_level() const; 1.294 + void set_highest_comp_level(int level); 1.295 + int highest_osr_comp_level() const; 1.296 + void set_highest_osr_comp_level(int level); 1.297 + 1.298 + // Count of times method was exited via exception while interpreting 1.299 + void interpreter_throwout_increment(TRAPS) { 1.300 + MethodCounters* mcs = get_method_counters(CHECK); 1.301 + if (mcs != NULL) { 1.302 + mcs->interpreter_throwout_increment(); 1.303 + } 1.304 + } 1.305 + 1.306 + int interpreter_throwout_count() const { 1.307 + if (method_counters() == NULL) { 1.308 + return 0; 1.309 + } else { 1.310 + return method_counters()->interpreter_throwout_count(); 1.311 + } 1.312 + } 1.313 + 1.314 + // size of parameters 1.315 + int size_of_parameters() const { return constMethod()->size_of_parameters(); } 1.316 + void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 1.317 + 1.318 + bool has_stackmap_table() const { 1.319 + return constMethod()->has_stackmap_table(); 1.320 + } 1.321 + 1.322 + Array<u1>* stackmap_data() const { 1.323 + return constMethod()->stackmap_data(); 1.324 + } 1.325 + 1.326 + void set_stackmap_data(Array<u1>* sd) { 1.327 + constMethod()->set_stackmap_data(sd); 1.328 + } 1.329 + 1.330 + // exception handler table 1.331 + bool has_exception_handler() const 1.332 + { return constMethod()->has_exception_handler(); } 1.333 + int exception_table_length() const 1.334 + { return constMethod()->exception_table_length(); } 1.335 + ExceptionTableElement* exception_table_start() const 1.336 + { return constMethod()->exception_table_start(); } 1.337 + 1.338 + // Finds the first entry point bci of an exception handler for an 1.339 + // exception of klass ex_klass thrown at throw_bci. A value of NULL 1.340 + // for ex_klass indicates that the exception klass is not known; in 1.341 + // this case it matches any constraint class. Returns -1 if the 1.342 + // exception cannot be handled in this method. The handler 1.343 + // constraint classes are loaded if necessary. Note that this may 1.344 + // throw an exception if loading of the constraint classes causes 1.345 + // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 1.346 + // If an exception is thrown, returns the bci of the 1.347 + // exception handler which caused the exception to be thrown, which 1.348 + // is needed for proper retries. See, for example, 1.349 + // InterpreterRuntime::exception_handler_for_exception. 1.350 + static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 1.351 + 1.352 + // method data access 1.353 + MethodData* method_data() const { 1.354 + return _method_data; 1.355 + } 1.356 + 1.357 + void set_method_data(MethodData* data) { 1.358 + // The store into method must be released. On platforms without 1.359 + // total store order (TSO) the reference may become visible before 1.360 + // the initialization of data otherwise. 1.361 + OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 1.362 + } 1.363 + 1.364 + MethodCounters* method_counters() const { 1.365 + return _method_counters; 1.366 + } 1.367 + 1.368 + void set_method_counters(MethodCounters* counters) { 1.369 + // The store into method must be released. On platforms without 1.370 + // total store order (TSO) the reference may become visible before 1.371 + // the initialization of data otherwise. 1.372 + OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters); 1.373 + } 1.374 + 1.375 +#ifdef TIERED 1.376 + // We are reusing interpreter_invocation_count as a holder for the previous event count! 1.377 + // We can do that since interpreter_invocation_count is not used in tiered. 1.378 + int prev_event_count() const { 1.379 + if (method_counters() == NULL) { 1.380 + return 0; 1.381 + } else { 1.382 + return method_counters()->interpreter_invocation_count(); 1.383 + } 1.384 + } 1.385 + void set_prev_event_count(int count, TRAPS) { 1.386 + MethodCounters* mcs = get_method_counters(CHECK); 1.387 + if (mcs != NULL) { 1.388 + mcs->set_interpreter_invocation_count(count); 1.389 + } 1.390 + } 1.391 + jlong prev_time() const { 1.392 + return method_counters() == NULL ? 0 : method_counters()->prev_time(); 1.393 + } 1.394 + void set_prev_time(jlong time, TRAPS) { 1.395 + MethodCounters* mcs = get_method_counters(CHECK); 1.396 + if (mcs != NULL) { 1.397 + mcs->set_prev_time(time); 1.398 + } 1.399 + } 1.400 + float rate() const { 1.401 + return method_counters() == NULL ? 0 : method_counters()->rate(); 1.402 + } 1.403 + void set_rate(float rate, TRAPS) { 1.404 + MethodCounters* mcs = get_method_counters(CHECK); 1.405 + if (mcs != NULL) { 1.406 + mcs->set_rate(rate); 1.407 + } 1.408 + } 1.409 +#endif 1.410 + 1.411 + int invocation_count(); 1.412 + int backedge_count(); 1.413 + 1.414 + bool was_executed_more_than(int n); 1.415 + bool was_never_executed() { return !was_executed_more_than(0); } 1.416 + 1.417 + static void build_interpreter_method_data(methodHandle method, TRAPS); 1.418 + 1.419 + static MethodCounters* build_method_counters(Method* m, TRAPS); 1.420 + 1.421 + int interpreter_invocation_count() { 1.422 + if (TieredCompilation) return invocation_count(); 1.423 + else return (method_counters() == NULL) ? 0 : 1.424 + method_counters()->interpreter_invocation_count(); 1.425 + } 1.426 + int increment_interpreter_invocation_count(TRAPS) { 1.427 + if (TieredCompilation) ShouldNotReachHere(); 1.428 + MethodCounters* mcs = get_method_counters(CHECK_0); 1.429 + return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 1.430 + } 1.431 + 1.432 +#ifndef PRODUCT 1.433 + int compiled_invocation_count() const { return _compiled_invocation_count; } 1.434 + void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 1.435 +#endif // not PRODUCT 1.436 + 1.437 + // Clear (non-shared space) pointers which could not be relevant 1.438 + // if this (shared) method were mapped into another JVM. 1.439 + void remove_unshareable_info(); 1.440 + 1.441 + // nmethod/verified compiler entry 1.442 + address verified_code_entry(); 1.443 + bool check_code() const; // Not inline to avoid circular ref 1.444 + nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } 1.445 + void clear_code(); // Clear out any compiled code 1.446 + static void set_code(methodHandle mh, nmethod* code); 1.447 + void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } 1.448 + address get_i2c_entry(); 1.449 + address get_c2i_entry(); 1.450 + address get_c2i_unverified_entry(); 1.451 + AdapterHandlerEntry* adapter() { return _adapter; } 1.452 + // setup entry points 1.453 + void link_method(methodHandle method, TRAPS); 1.454 + // clear entry points. Used by sharing code 1.455 + void unlink_method(); 1.456 + 1.457 + // vtable index 1.458 + enum VtableIndexFlag { 1.459 + // Valid vtable indexes are non-negative (>= 0). 1.460 + // These few negative values are used as sentinels. 1.461 + itable_index_max = -10, // first itable index, growing downward 1.462 + pending_itable_index = -9, // itable index will be assigned 1.463 + invalid_vtable_index = -4, // distinct from any valid vtable index 1.464 + garbage_vtable_index = -3, // not yet linked; no vtable layout yet 1.465 + nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 1.466 + // 6330203 Note: Do not use -1, which was overloaded with many meanings. 1.467 + }; 1.468 + DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 1.469 + bool has_vtable_index() const { return _vtable_index >= 0; } 1.470 + int vtable_index() const { return _vtable_index; } 1.471 + void set_vtable_index(int index) { _vtable_index = index; } 1.472 + DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 1.473 + bool has_itable_index() const { return _vtable_index <= itable_index_max; } 1.474 + int itable_index() const { assert(valid_itable_index(), ""); 1.475 + return itable_index_max - _vtable_index; } 1.476 + void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); } 1.477 + 1.478 + // interpreter entry 1.479 + address interpreter_entry() const { return _i2i_entry; } 1.480 + // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 1.481 + void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; } 1.482 + 1.483 + // native function (used for native methods only) 1.484 + enum { 1.485 + native_bind_event_is_interesting = true 1.486 + }; 1.487 + address native_function() const { return *(native_function_addr()); } 1.488 + address critical_native_function(); 1.489 + 1.490 + // Must specify a real function (not NULL). 1.491 + // Use clear_native_function() to unregister. 1.492 + void set_native_function(address function, bool post_event_flag); 1.493 + bool has_native_function() const; 1.494 + void clear_native_function(); 1.495 + 1.496 + // signature handler (used for native methods only) 1.497 + address signature_handler() const { return *(signature_handler_addr()); } 1.498 + void set_signature_handler(address handler); 1.499 + 1.500 + // Interpreter oopmap support 1.501 + void mask_for(int bci, InterpreterOopMap* mask); 1.502 + 1.503 +#ifndef PRODUCT 1.504 + // operations on invocation counter 1.505 + void print_invocation_count(); 1.506 +#endif 1.507 + 1.508 + // byte codes 1.509 + void set_code(address code) { return constMethod()->set_code(code); } 1.510 + address code_base() const { return constMethod()->code_base(); } 1.511 + bool contains(address bcp) const { return constMethod()->contains(bcp); } 1.512 + 1.513 + // prints byte codes 1.514 + void print_codes() const { print_codes_on(tty); } 1.515 + void print_codes_on(outputStream* st) const PRODUCT_RETURN; 1.516 + void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN; 1.517 + 1.518 + // method parameters 1.519 + bool has_method_parameters() const 1.520 + { return constMethod()->has_method_parameters(); } 1.521 + int method_parameters_length() const 1.522 + { return constMethod()->method_parameters_length(); } 1.523 + MethodParametersElement* method_parameters_start() const 1.524 + { return constMethod()->method_parameters_start(); } 1.525 + 1.526 + // checked exceptions 1.527 + int checked_exceptions_length() const 1.528 + { return constMethod()->checked_exceptions_length(); } 1.529 + CheckedExceptionElement* checked_exceptions_start() const 1.530 + { return constMethod()->checked_exceptions_start(); } 1.531 + 1.532 + // localvariable table 1.533 + bool has_localvariable_table() const 1.534 + { return constMethod()->has_localvariable_table(); } 1.535 + int localvariable_table_length() const 1.536 + { return constMethod()->localvariable_table_length(); } 1.537 + LocalVariableTableElement* localvariable_table_start() const 1.538 + { return constMethod()->localvariable_table_start(); } 1.539 + 1.540 + bool has_linenumber_table() const 1.541 + { return constMethod()->has_linenumber_table(); } 1.542 + u_char* compressed_linenumber_table() const 1.543 + { return constMethod()->compressed_linenumber_table(); } 1.544 + 1.545 + // method holder (the Klass* holding this method) 1.546 + InstanceKlass* method_holder() const { return constants()->pool_holder(); } 1.547 + 1.548 + void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 1.549 + Symbol* klass_name() const; // returns the name of the method holder 1.550 + BasicType result_type() const; // type of the method result 1.551 + int result_type_index() const; // type index of the method result 1.552 + bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 1.553 + bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 1.554 + 1.555 + // Checked exceptions thrown by this method (resolved to mirrors) 1.556 + objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 1.557 + 1.558 + // Access flags 1.559 + bool is_public() const { return access_flags().is_public(); } 1.560 + bool is_private() const { return access_flags().is_private(); } 1.561 + bool is_protected() const { return access_flags().is_protected(); } 1.562 + bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 1.563 + bool is_static() const { return access_flags().is_static(); } 1.564 + bool is_final() const { return access_flags().is_final(); } 1.565 + bool is_synchronized() const { return access_flags().is_synchronized();} 1.566 + bool is_native() const { return access_flags().is_native(); } 1.567 + bool is_abstract() const { return access_flags().is_abstract(); } 1.568 + bool is_strict() const { return access_flags().is_strict(); } 1.569 + bool is_synthetic() const { return access_flags().is_synthetic(); } 1.570 + 1.571 + // returns true if contains only return operation 1.572 + bool is_empty_method() const; 1.573 + 1.574 + // returns true if this is a vanilla constructor 1.575 + bool is_vanilla_constructor() const; 1.576 + 1.577 + // checks method and its method holder 1.578 + bool is_final_method() const; 1.579 + bool is_final_method(AccessFlags class_access_flags) const; 1.580 + bool is_default_method() const; 1.581 + 1.582 + // true if method needs no dynamic dispatch (final and/or no vtable entry) 1.583 + bool can_be_statically_bound() const; 1.584 + bool can_be_statically_bound(AccessFlags class_access_flags) const; 1.585 + 1.586 + // returns true if the method has any backward branches. 1.587 + bool has_loops() { 1.588 + return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 1.589 + }; 1.590 + 1.591 + bool compute_has_loops_flag(); 1.592 + 1.593 + bool has_jsrs() { 1.594 + return access_flags().has_jsrs(); 1.595 + }; 1.596 + void set_has_jsrs() { 1.597 + _access_flags.set_has_jsrs(); 1.598 + } 1.599 + 1.600 + // returns true if the method has any monitors. 1.601 + bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 1.602 + bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 1.603 + 1.604 + void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 1.605 + 1.606 + // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 1.607 + // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 1.608 + // has not been computed yet. 1.609 + bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 1.610 + void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 1.611 + 1.612 + // returns true if the method is an accessor function (setter/getter). 1.613 + bool is_accessor() const; 1.614 + 1.615 + // returns true if the method is an initializer (<init> or <clinit>). 1.616 + bool is_initializer() const; 1.617 + 1.618 + // returns true if the method is static OR if the classfile version < 51 1.619 + bool has_valid_initializer_flags() const; 1.620 + 1.621 + // returns true if the method name is <clinit> and the method has 1.622 + // valid static initializer flags. 1.623 + bool is_static_initializer() const; 1.624 + 1.625 + // compiled code support 1.626 + // NOTE: code() is inherently racy as deopt can be clearing code 1.627 + // simultaneously. Use with caution. 1.628 + bool has_compiled_code() const { return code() != NULL; } 1.629 + 1.630 + // sizing 1.631 + static int header_size() { return sizeof(Method)/HeapWordSize; } 1.632 + static int size(bool is_native); 1.633 + int size() const { return method_size(); } 1.634 +#if INCLUDE_SERVICES 1.635 + void collect_statistics(KlassSizeStats *sz) const; 1.636 +#endif 1.637 + 1.638 + // interpreter support 1.639 + static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 1.640 + static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 1.641 +#ifdef CC_INTERP 1.642 + static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); } 1.643 +#endif /* CC_INTERP */ 1.644 + static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 1.645 + static ByteSize code_offset() { return byte_offset_of(Method, _code); } 1.646 + static ByteSize method_data_offset() { 1.647 + return byte_offset_of(Method, _method_data); 1.648 + } 1.649 + static ByteSize method_counters_offset() { 1.650 + return byte_offset_of(Method, _method_counters); 1.651 + } 1.652 +#ifndef PRODUCT 1.653 + static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 1.654 +#endif // not PRODUCT 1.655 + static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 1.656 + static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 1.657 + static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 1.658 + static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 1.659 + 1.660 + // for code generation 1.661 + static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 1.662 + static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 1.663 + static int intrinsic_id_size_in_bytes() { return sizeof(u1); } 1.664 + 1.665 + // Static methods that are used to implement member methods where an exposed this pointer 1.666 + // is needed due to possible GCs 1.667 + static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS); 1.668 + 1.669 + // Returns the byte code index from the byte code pointer 1.670 + int bci_from(address bcp) const; 1.671 + address bcp_from(int bci) const; 1.672 + int validate_bci_from_bcx(intptr_t bcx) const; 1.673 + 1.674 + // Returns the line number for a bci if debugging information for the method is prowided, 1.675 + // -1 is returned otherwise. 1.676 + int line_number_from_bci(int bci) const; 1.677 + 1.678 + // Reflection support 1.679 + bool is_overridden_in(Klass* k) const; 1.680 + 1.681 + // Stack walking support 1.682 + bool is_ignored_by_security_stack_walk() const; 1.683 + 1.684 + // JSR 292 support 1.685 + bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 1.686 + bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 1.687 + bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 1.688 + static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 1.689 + Symbol* signature, //anything at all 1.690 + TRAPS); 1.691 + static Klass* check_non_bcp_klass(Klass* klass); 1.692 + 1.693 + // How many extra stack entries for invokedynamic when it's enabled 1.694 + static const int extra_stack_entries_for_jsr292 = 1; 1.695 + 1.696 + // this operates only on invoke methods: 1.697 + // presize interpreter frames for extra interpreter stack entries, if needed 1.698 + // Account for the extra appendix argument for invokehandle/invokedynamic 1.699 + static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; } 1.700 + static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 1.701 + 1.702 + // RedefineClasses() support: 1.703 + bool is_old() const { return access_flags().is_old(); } 1.704 + void set_is_old() { _access_flags.set_is_old(); } 1.705 + bool is_obsolete() const { return access_flags().is_obsolete(); } 1.706 + void set_is_obsolete() { _access_flags.set_is_obsolete(); } 1.707 + bool on_stack() const { return access_flags().on_stack(); } 1.708 + void set_on_stack(const bool value); 1.709 + 1.710 + // see the definition in Method*.cpp for the gory details 1.711 + bool should_not_be_cached() const; 1.712 + 1.713 + // JVMTI Native method prefixing support: 1.714 + bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 1.715 + void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 1.716 + 1.717 + // Rewriting support 1.718 + static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 1.719 + u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 1.720 + 1.721 + // jmethodID handling 1.722 + // Because the useful life-span of a jmethodID cannot be determined, 1.723 + // once created they are never reclaimed. The methods to which they refer, 1.724 + // however, can be GC'ed away if the class is unloaded or if the method is 1.725 + // made obsolete or deleted -- in these cases, the jmethodID 1.726 + // refers to NULL (as is the case for any weak reference). 1.727 + static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 1.728 + static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 1.729 + 1.730 + // Use resolve_jmethod_id() in situations where the caller is expected 1.731 + // to provide a valid jmethodID; the only sanity checks are in asserts; 1.732 + // result guaranteed not to be NULL. 1.733 + inline static Method* resolve_jmethod_id(jmethodID mid) { 1.734 + assert(mid != NULL, "JNI method id should not be null"); 1.735 + return *((Method**)mid); 1.736 + } 1.737 + 1.738 + // Use checked_resolve_jmethod_id() in situations where the caller 1.739 + // should provide a valid jmethodID, but might not. NULL is returned 1.740 + // when the jmethodID does not refer to a valid method. 1.741 + static Method* checked_resolve_jmethod_id(jmethodID mid); 1.742 + 1.743 + static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 1.744 + static bool is_method_id(jmethodID mid); 1.745 + 1.746 + // Clear methods 1.747 + static void clear_jmethod_ids(ClassLoaderData* loader_data); 1.748 + static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 1.749 + 1.750 + // Get this method's jmethodID -- allocate if it doesn't exist 1.751 + jmethodID jmethod_id() { methodHandle this_h(this); 1.752 + return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 1.753 + 1.754 + // Lookup the jmethodID for this method. Return NULL if not found. 1.755 + // NOTE that this function can be called from a signal handler 1.756 + // (see AsyncGetCallTrace support for Forte Analyzer) and this 1.757 + // needs to be async-safe. No allocation should be done and 1.758 + // so handles are not used to avoid deadlock. 1.759 + jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 1.760 + 1.761 + // Support for inlining of intrinsic methods 1.762 + vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 1.763 + void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; } 1.764 + 1.765 + // Helper routines for intrinsic_id() and vmIntrinsics::method(). 1.766 + void init_intrinsic_id(); // updates from _none if a match 1.767 + static vmSymbols::SID klass_id_for_intrinsics(Klass* holder); 1.768 + 1.769 + bool jfr_towrite() { return _jfr_towrite; } 1.770 + void set_jfr_towrite(bool x) { _jfr_towrite = x; } 1.771 + bool caller_sensitive() { return _caller_sensitive; } 1.772 + void set_caller_sensitive(bool x) { _caller_sensitive = x; } 1.773 + bool force_inline() { return _force_inline; } 1.774 + void set_force_inline(bool x) { _force_inline = x; } 1.775 + bool dont_inline() { return _dont_inline; } 1.776 + void set_dont_inline(bool x) { _dont_inline = x; } 1.777 + bool is_hidden() { return _hidden; } 1.778 + void set_hidden(bool x) { _hidden = x; } 1.779 + ConstMethod::MethodType method_type() const { 1.780 + return _constMethod->method_type(); 1.781 + } 1.782 + bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 1.783 + 1.784 + // On-stack replacement support 1.785 + bool has_osr_nmethod(int level, bool match_level) { 1.786 + return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 1.787 + } 1.788 + 1.789 + nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 1.790 + return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 1.791 + } 1.792 + 1.793 + // Inline cache support 1.794 + void cleanup_inline_caches(); 1.795 + 1.796 + // Find if klass for method is loaded 1.797 + bool is_klass_loaded_by_klass_index(int klass_index) const; 1.798 + bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 1.799 + 1.800 + // Indicates whether compilation failed earlier for this method, or 1.801 + // whether it is not compilable for another reason like having a 1.802 + // breakpoint set in it. 1.803 + bool is_not_compilable(int comp_level = CompLevel_any) const; 1.804 + void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 1.805 + void set_not_compilable_quietly(int comp_level = CompLevel_all) { 1.806 + set_not_compilable(comp_level, false); 1.807 + } 1.808 + bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 1.809 + void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 1.810 + void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 1.811 + set_not_osr_compilable(comp_level, false); 1.812 + } 1.813 + bool is_always_compilable() const; 1.814 + 1.815 + private: 1.816 + void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 1.817 + 1.818 + public: 1.819 + MethodCounters* get_method_counters(TRAPS) { 1.820 + if (_method_counters == NULL) { 1.821 + build_method_counters(this, CHECK_AND_CLEAR_NULL); 1.822 + } 1.823 + return _method_counters; 1.824 + } 1.825 + 1.826 + bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 1.827 + void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 1.828 + void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 1.829 + bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 1.830 + void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 1.831 + void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 1.832 + 1.833 + bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 1.834 + void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 1.835 + void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 1.836 + bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 1.837 + void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 1.838 + void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 1.839 + 1.840 + // Background compilation support 1.841 + bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 1.842 + void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 1.843 + void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 1.844 + 1.845 + // Resolve all classes in signature, return 'true' if successful 1.846 + static bool load_signature_classes(methodHandle m, TRAPS); 1.847 + 1.848 + // Return if true if not all classes references in signature, including return type, has been loaded 1.849 + static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 1.850 + 1.851 + // Printing 1.852 + void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 1.853 +#if INCLUDE_JVMTI 1.854 + void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 1.855 +#else 1.856 + void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 1.857 +#endif 1.858 + 1.859 + // Helper routine used for method sorting 1.860 + static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 1.861 + 1.862 + // Deallocation function for redefine classes or if an error occurs 1.863 + void deallocate_contents(ClassLoaderData* loader_data); 1.864 + 1.865 + // Printing 1.866 +#ifndef PRODUCT 1.867 + void print_on(outputStream* st) const; 1.868 +#endif 1.869 + void print_value_on(outputStream* st) const; 1.870 + 1.871 + const char* internal_name() const { return "{method}"; } 1.872 + 1.873 + // Check for valid method pointer 1.874 + static bool has_method_vptr(const void* ptr); 1.875 + bool is_valid_method() const; 1.876 + 1.877 + // Verify 1.878 + void verify() { verify_on(tty); } 1.879 + void verify_on(outputStream* st); 1.880 + 1.881 + private: 1.882 + 1.883 + // Inlined elements 1.884 + address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 1.885 + address* signature_handler_addr() const { return native_function_addr() + 1; } 1.886 +}; 1.887 + 1.888 + 1.889 +// Utility class for compressing line number tables 1.890 + 1.891 +class CompressedLineNumberWriteStream: public CompressedWriteStream { 1.892 + private: 1.893 + int _bci; 1.894 + int _line; 1.895 + public: 1.896 + // Constructor 1.897 + CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 1.898 + CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 1.899 + 1.900 + // Write (bci, line number) pair to stream 1.901 + void write_pair_regular(int bci_delta, int line_delta); 1.902 + 1.903 + inline void write_pair_inline(int bci, int line) { 1.904 + int bci_delta = bci - _bci; 1.905 + int line_delta = line - _line; 1.906 + _bci = bci; 1.907 + _line = line; 1.908 + // Skip (0,0) deltas - they do not add information and conflict with terminator. 1.909 + if (bci_delta == 0 && line_delta == 0) return; 1.910 + // Check if bci is 5-bit and line number 3-bit unsigned. 1.911 + if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 1.912 + // Compress into single byte. 1.913 + jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 1.914 + // Check that value doesn't match escape character. 1.915 + if (value != 0xFF) { 1.916 + write_byte(value); 1.917 + return; 1.918 + } 1.919 + } 1.920 + write_pair_regular(bci_delta, line_delta); 1.921 + } 1.922 + 1.923 +// Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 1.924 +// Disabling optimization doesn't work for methods in header files 1.925 +// so we force it to call through the non-optimized version in the .cpp. 1.926 +// It's gross, but it's the only way we can ensure that all callers are 1.927 +// fixed. _MSC_VER is defined by the windows compiler 1.928 +#if defined(_M_AMD64) && _MSC_VER >= 1400 1.929 + void write_pair(int bci, int line); 1.930 +#else 1.931 + void write_pair(int bci, int line) { write_pair_inline(bci, line); } 1.932 +#endif 1.933 + 1.934 + // Write end-of-stream marker 1.935 + void write_terminator() { write_byte(0); } 1.936 +}; 1.937 + 1.938 + 1.939 +// Utility class for decompressing line number tables 1.940 + 1.941 +class CompressedLineNumberReadStream: public CompressedReadStream { 1.942 + private: 1.943 + int _bci; 1.944 + int _line; 1.945 + public: 1.946 + // Constructor 1.947 + CompressedLineNumberReadStream(u_char* buffer); 1.948 + // Read (bci, line number) pair from stream. Returns false at end-of-stream. 1.949 + bool read_pair(); 1.950 + // Accessing bci and line number (after calling read_pair) 1.951 + int bci() const { return _bci; } 1.952 + int line() const { return _line; } 1.953 +}; 1.954 + 1.955 + 1.956 +/// Fast Breakpoints. 1.957 + 1.958 +// If this structure gets more complicated (because bpts get numerous), 1.959 +// move it into its own header. 1.960 + 1.961 +// There is presently no provision for concurrent access 1.962 +// to breakpoint lists, which is only OK for JVMTI because 1.963 +// breakpoints are written only at safepoints, and are read 1.964 +// concurrently only outside of safepoints. 1.965 + 1.966 +class BreakpointInfo : public CHeapObj<mtClass> { 1.967 + friend class VMStructs; 1.968 + private: 1.969 + Bytecodes::Code _orig_bytecode; 1.970 + int _bci; 1.971 + u2 _name_index; // of method 1.972 + u2 _signature_index; // of method 1.973 + BreakpointInfo* _next; // simple storage allocation 1.974 + 1.975 + public: 1.976 + BreakpointInfo(Method* m, int bci); 1.977 + 1.978 + // accessors 1.979 + Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1.980 + void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1.981 + int bci() { return _bci; } 1.982 + 1.983 + BreakpointInfo* next() const { return _next; } 1.984 + void set_next(BreakpointInfo* n) { _next = n; } 1.985 + 1.986 + // helps for searchers 1.987 + bool match(const Method* m, int bci) { 1.988 + return bci == _bci && match(m); 1.989 + } 1.990 + 1.991 + bool match(const Method* m) { 1.992 + return _name_index == m->name_index() && 1.993 + _signature_index == m->signature_index(); 1.994 + } 1.995 + 1.996 + void set(Method* method); 1.997 + void clear(Method* method); 1.998 +}; 1.999 + 1.1000 +// Utility class for access exception handlers 1.1001 +class ExceptionTable : public StackObj { 1.1002 + private: 1.1003 + ExceptionTableElement* _table; 1.1004 + u2 _length; 1.1005 + 1.1006 + public: 1.1007 + ExceptionTable(const Method* m) { 1.1008 + if (m->has_exception_handler()) { 1.1009 + _table = m->exception_table_start(); 1.1010 + _length = m->exception_table_length(); 1.1011 + } else { 1.1012 + _table = NULL; 1.1013 + _length = 0; 1.1014 + } 1.1015 + } 1.1016 + 1.1017 + int length() const { 1.1018 + return _length; 1.1019 + } 1.1020 + 1.1021 + u2 start_pc(int idx) const { 1.1022 + assert(idx < _length, "out of bounds"); 1.1023 + return _table[idx].start_pc; 1.1024 + } 1.1025 + 1.1026 + void set_start_pc(int idx, u2 value) { 1.1027 + assert(idx < _length, "out of bounds"); 1.1028 + _table[idx].start_pc = value; 1.1029 + } 1.1030 + 1.1031 + u2 end_pc(int idx) const { 1.1032 + assert(idx < _length, "out of bounds"); 1.1033 + return _table[idx].end_pc; 1.1034 + } 1.1035 + 1.1036 + void set_end_pc(int idx, u2 value) { 1.1037 + assert(idx < _length, "out of bounds"); 1.1038 + _table[idx].end_pc = value; 1.1039 + } 1.1040 + 1.1041 + u2 handler_pc(int idx) const { 1.1042 + assert(idx < _length, "out of bounds"); 1.1043 + return _table[idx].handler_pc; 1.1044 + } 1.1045 + 1.1046 + void set_handler_pc(int idx, u2 value) { 1.1047 + assert(idx < _length, "out of bounds"); 1.1048 + _table[idx].handler_pc = value; 1.1049 + } 1.1050 + 1.1051 + u2 catch_type_index(int idx) const { 1.1052 + assert(idx < _length, "out of bounds"); 1.1053 + return _table[idx].catch_type_index; 1.1054 + } 1.1055 + 1.1056 + void set_catch_type_index(int idx, u2 value) { 1.1057 + assert(idx < _length, "out of bounds"); 1.1058 + _table[idx].catch_type_index = value; 1.1059 + } 1.1060 +}; 1.1061 + 1.1062 +#endif // SHARE_VM_OOPS_METHODOOP_HPP