1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/oops/cpCache.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,499 @@ 1.4 +/* 1.5 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_OOPS_CPCACHEOOP_HPP 1.29 +#define SHARE_VM_OOPS_CPCACHEOOP_HPP 1.30 + 1.31 +#include "interpreter/bytecodes.hpp" 1.32 +#include "memory/allocation.hpp" 1.33 +#include "utilities/array.hpp" 1.34 + 1.35 +class PSPromotionManager; 1.36 + 1.37 +// The ConstantPoolCache is not a cache! It is the resolution table that the 1.38 +// interpreter uses to avoid going into the runtime and a way to access resolved 1.39 +// values. 1.40 + 1.41 +// A ConstantPoolCacheEntry describes an individual entry of the constant 1.42 +// pool cache. There's 2 principal kinds of entries: field entries for in- 1.43 +// stance & static field access, and method entries for invokes. Some of 1.44 +// the entry layout is shared and looks as follows: 1.45 +// 1.46 +// bit number |31 0| 1.47 +// bit length |-8--|-8--|---16----| 1.48 +// -------------------------------- 1.49 +// _indices [ b2 | b1 | index ] index = constant_pool_index 1.50 +// _f1 [ entry specific ] metadata ptr (method or klass) 1.51 +// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr 1.52 +// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries) 1.53 +// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|----16-----] 1.54 +// _flags [tos|0|F=0|M|A|I|f|0|vf|0000|00000|psize] (for method entries) 1.55 +// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--] 1.56 + 1.57 +// -------------------------------- 1.58 +// 1.59 +// with: 1.60 +// index = original constant pool index 1.61 +// b1 = bytecode 1 1.62 +// b2 = bytecode 2 1.63 +// psize = parameters size (method entries only) 1.64 +// field_index = index into field information in holder InstanceKlass 1.65 +// The index max is 0xffff (max number of fields in constant pool) 1.66 +// and is multiplied by (InstanceKlass::next_offset) when accessing. 1.67 +// tos = TosState 1.68 +// F = the entry is for a field (or F=0 for a method) 1.69 +// A = call site has an appendix argument (loaded from resolved references) 1.70 +// I = interface call is forced virtual (must use a vtable index or vfinal) 1.71 +// f = field or method is final 1.72 +// v = field is volatile 1.73 +// vf = virtual but final (method entries only: is_vfinal()) 1.74 +// 1.75 +// The flags after TosState have the following interpretation: 1.76 +// bit 27: 0 for fields, 1 for methods 1.77 +// f flag true if field is marked final 1.78 +// v flag true if field is volatile (only for fields) 1.79 +// f2 flag true if f2 contains an oop (e.g., virtual final method) 1.80 +// fv flag true if invokeinterface used for method in class Object 1.81 +// 1.82 +// The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the 1.83 +// following mapping to the TosState states: 1.84 +// 1.85 +// btos: 0 1.86 +// ctos: 1 1.87 +// stos: 2 1.88 +// itos: 3 1.89 +// ltos: 4 1.90 +// ftos: 5 1.91 +// dtos: 6 1.92 +// atos: 7 1.93 +// vtos: 8 1.94 +// 1.95 +// Entry specific: field entries: 1.96 +// _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index 1.97 +// _f1 = field holder (as a java.lang.Class, not a Klass*) 1.98 +// _f2 = field offset in bytes 1.99 +// _flags = field type information, original FieldInfo index in field holder 1.100 +// (field_index section) 1.101 +// 1.102 +// Entry specific: method entries: 1.103 +// _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section), 1.104 +// original constant pool index 1.105 +// _f1 = Method* for non-virtual calls, unused by virtual calls. 1.106 +// for interface calls, which are essentially virtual but need a klass, 1.107 +// contains Klass* for the corresponding interface. 1.108 +// for invokedynamic, f1 contains a site-specific CallSite object (as an appendix) 1.109 +// for invokehandle, f1 contains a site-specific MethodType object (as an appendix) 1.110 +// (upcoming metadata changes will move the appendix to a separate array) 1.111 +// _f2 = vtable/itable index (or final Method*) for virtual calls only, 1.112 +// unused by non-virtual. The is_vfinal flag indicates this is a 1.113 +// method pointer for a final method, not an index. 1.114 +// _flags = method type info (t section), 1.115 +// virtual final bit (vfinal), 1.116 +// parameter size (psize section) 1.117 +// 1.118 +// Note: invokevirtual & invokespecial bytecodes can share the same constant 1.119 +// pool entry and thus the same constant pool cache entry. All invoke 1.120 +// bytecodes but invokevirtual use only _f1 and the corresponding b1 1.121 +// bytecode, while invokevirtual uses only _f2 and the corresponding 1.122 +// b2 bytecode. The value of _flags is shared for both types of entries. 1.123 +// 1.124 +// The fields are volatile so that they are stored in the order written in the 1.125 +// source code. The _indices field with the bytecode must be written last. 1.126 + 1.127 +class CallInfo; 1.128 + 1.129 +class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { 1.130 + friend class VMStructs; 1.131 + friend class constantPoolCacheKlass; 1.132 + friend class ConstantPool; 1.133 + friend class InterpreterRuntime; 1.134 + 1.135 + private: 1.136 + volatile intx _indices; // constant pool index & rewrite bytecodes 1.137 + volatile Metadata* _f1; // entry specific metadata field 1.138 + volatile intx _f2; // entry specific int/metadata field 1.139 + volatile intx _flags; // flags 1.140 + 1.141 + 1.142 + void set_bytecode_1(Bytecodes::Code code); 1.143 + void set_bytecode_2(Bytecodes::Code code); 1.144 + void set_f1(Metadata* f1) { 1.145 + Metadata* existing_f1 = (Metadata*)_f1; // read once 1.146 + assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); 1.147 + _f1 = f1; 1.148 + } 1.149 + void release_set_f1(Metadata* f1); 1.150 + void set_f2(intx f2) { 1.151 + intx existing_f2 = _f2; // read once 1.152 + assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change"); 1.153 + _f2 = f2; 1.154 + } 1.155 + void set_f2_as_vfinal_method(Method* f2) { 1.156 + assert(is_vfinal(), "flags must be set"); 1.157 + set_f2((intx)f2); 1.158 + } 1.159 + int make_flags(TosState state, int option_bits, int field_index_or_method_params); 1.160 + void set_flags(intx flags) { _flags = flags; } 1.161 + bool init_flags_atomic(intx flags); 1.162 + void set_field_flags(TosState field_type, int option_bits, int field_index) { 1.163 + assert((field_index & field_index_mask) == field_index, "field_index in range"); 1.164 + set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); 1.165 + } 1.166 + void set_method_flags(TosState return_type, int option_bits, int method_params) { 1.167 + assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 1.168 + set_flags(make_flags(return_type, option_bits, method_params)); 1.169 + } 1.170 + bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) { 1.171 + assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 1.172 + return init_flags_atomic(make_flags(return_type, option_bits, method_params)); 1.173 + } 1.174 + 1.175 + public: 1.176 + // specific bit definitions for the flags field: 1.177 + // (Note: the interpreter must use these definitions to access the CP cache.) 1.178 + enum { 1.179 + // high order bits are the TosState corresponding to field type or method return type 1.180 + tos_state_bits = 4, 1.181 + tos_state_mask = right_n_bits(tos_state_bits), 1.182 + tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below 1.183 + // misc. option bits; can be any bit position in [16..27] 1.184 + is_field_entry_shift = 26, // (F) is it a field or a method? 1.185 + has_method_type_shift = 25, // (M) does the call site have a MethodType? 1.186 + has_appendix_shift = 24, // (A) does the call site have an appendix argument? 1.187 + is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode? 1.188 + is_final_shift = 22, // (f) is the field or method final? 1.189 + is_volatile_shift = 21, // (v) is the field volatile? 1.190 + is_vfinal_shift = 20, // (vf) did the call resolve to a final method? 1.191 + // low order bits give field index (for FieldInfo) or method parameter size: 1.192 + field_index_bits = 16, 1.193 + field_index_mask = right_n_bits(field_index_bits), 1.194 + parameter_size_bits = 8, // subset of field_index_mask, range is 0..255 1.195 + parameter_size_mask = right_n_bits(parameter_size_bits), 1.196 + option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask)) 1.197 + }; 1.198 + 1.199 + // specific bit definitions for the indices field: 1.200 + enum { 1.201 + cp_index_bits = 2*BitsPerByte, 1.202 + cp_index_mask = right_n_bits(cp_index_bits), 1.203 + bytecode_1_shift = cp_index_bits, 1.204 + bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF 1.205 + bytecode_2_shift = cp_index_bits + BitsPerByte, 1.206 + bytecode_2_mask = right_n_bits(BitsPerByte) // == (u1)0xFF 1.207 + }; 1.208 + 1.209 + 1.210 + // Initialization 1.211 + void initialize_entry(int original_index); // initialize primary entry 1.212 + void initialize_resolved_reference_index(int ref_index) { 1.213 + assert(_f2 == 0, "set once"); // note: ref_index might be zero also 1.214 + _f2 = ref_index; 1.215 + } 1.216 + 1.217 + void set_field( // sets entry to resolved field state 1.218 + Bytecodes::Code get_code, // the bytecode used for reading the field 1.219 + Bytecodes::Code put_code, // the bytecode used for writing the field 1.220 + KlassHandle field_holder, // the object/klass holding the field 1.221 + int orig_field_index, // the original field index in the field holder 1.222 + int field_offset, // the field offset in words in the field holder 1.223 + TosState field_type, // the (machine) field type 1.224 + bool is_final, // the field is final 1.225 + bool is_volatile, // the field is volatile 1.226 + Klass* root_klass // needed by the GC to dirty the klass 1.227 + ); 1.228 + 1.229 + private: 1.230 + void set_direct_or_vtable_call( 1.231 + Bytecodes::Code invoke_code, // the bytecode used for invoking the method 1.232 + methodHandle method, // the method/prototype if any (NULL, otherwise) 1.233 + int vtable_index // the vtable index if any, else negative 1.234 + ); 1.235 + 1.236 + public: 1.237 + void set_direct_call( // sets entry to exact concrete method entry 1.238 + Bytecodes::Code invoke_code, // the bytecode used for invoking the method 1.239 + methodHandle method // the method to call 1.240 + ); 1.241 + 1.242 + void set_vtable_call( // sets entry to vtable index 1.243 + Bytecodes::Code invoke_code, // the bytecode used for invoking the method 1.244 + methodHandle method, // resolved method which declares the vtable index 1.245 + int vtable_index // the vtable index 1.246 + ); 1.247 + 1.248 + void set_itable_call( 1.249 + Bytecodes::Code invoke_code, // the bytecode used; must be invokeinterface 1.250 + methodHandle method, // the resolved interface method 1.251 + int itable_index // index into itable for the method 1.252 + ); 1.253 + 1.254 + void set_method_handle( 1.255 + constantPoolHandle cpool, // holding constant pool (required for locking) 1.256 + const CallInfo &call_info // Call link information 1.257 + ); 1.258 + 1.259 + void set_dynamic_call( 1.260 + constantPoolHandle cpool, // holding constant pool (required for locking) 1.261 + const CallInfo &call_info // Call link information 1.262 + ); 1.263 + 1.264 + // Common code for invokedynamic and MH invocations. 1.265 + 1.266 + // The "appendix" is an optional call-site-specific parameter which is 1.267 + // pushed by the JVM at the end of the argument list. This argument may 1.268 + // be a MethodType for the MH.invokes and a CallSite for an invokedynamic 1.269 + // instruction. However, its exact type and use depends on the Java upcall, 1.270 + // which simply returns a compiled LambdaForm along with any reference 1.271 + // that LambdaForm needs to complete the call. If the upcall returns a 1.272 + // null appendix, the argument is not passed at all. 1.273 + // 1.274 + // The appendix is *not* represented in the signature of the symbolic 1.275 + // reference for the call site, but (if present) it *is* represented in 1.276 + // the Method* bound to the site. This means that static and dynamic 1.277 + // resolution logic needs to make slightly different assessments about the 1.278 + // number and types of arguments. 1.279 + void set_method_handle_common( 1.280 + constantPoolHandle cpool, // holding constant pool (required for locking) 1.281 + Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic 1.282 + const CallInfo &call_info // Call link information 1.283 + ); 1.284 + 1.285 + // invokedynamic and invokehandle call sites have two entries in the 1.286 + // resolved references array: 1.287 + // appendix (at index+0) 1.288 + // MethodType (at index+1) 1.289 + enum { 1.290 + _indy_resolved_references_appendix_offset = 0, 1.291 + _indy_resolved_references_method_type_offset = 1, 1.292 + _indy_resolved_references_entries 1.293 + }; 1.294 + 1.295 + Method* method_if_resolved(constantPoolHandle cpool); 1.296 + oop appendix_if_resolved(constantPoolHandle cpool); 1.297 + oop method_type_if_resolved(constantPoolHandle cpool); 1.298 + 1.299 + void set_parameter_size(int value); 1.300 + 1.301 + // Which bytecode number (1 or 2) in the index field is valid for this bytecode? 1.302 + // Returns -1 if neither is valid. 1.303 + static int bytecode_number(Bytecodes::Code code) { 1.304 + switch (code) { 1.305 + case Bytecodes::_getstatic : // fall through 1.306 + case Bytecodes::_getfield : // fall through 1.307 + case Bytecodes::_invokespecial : // fall through 1.308 + case Bytecodes::_invokestatic : // fall through 1.309 + case Bytecodes::_invokehandle : // fall through 1.310 + case Bytecodes::_invokedynamic : // fall through 1.311 + case Bytecodes::_invokeinterface : return 1; 1.312 + case Bytecodes::_putstatic : // fall through 1.313 + case Bytecodes::_putfield : // fall through 1.314 + case Bytecodes::_invokevirtual : return 2; 1.315 + default : break; 1.316 + } 1.317 + return -1; 1.318 + } 1.319 + 1.320 + // Has this bytecode been resolved? Only valid for invokes and get/put field/static. 1.321 + bool is_resolved(Bytecodes::Code code) const { 1.322 + switch (bytecode_number(code)) { 1.323 + case 1: return (bytecode_1() == code); 1.324 + case 2: return (bytecode_2() == code); 1.325 + } 1.326 + return false; // default: not resolved 1.327 + } 1.328 + 1.329 + // Accessors 1.330 + int indices() const { return _indices; } 1.331 + int indices_ord() const { return (intx)OrderAccess::load_ptr_acquire(&_indices); } 1.332 + int constant_pool_index() const { return (indices() & cp_index_mask); } 1.333 + Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); } 1.334 + Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); } 1.335 + Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); } 1.336 + Method* f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; } 1.337 + Klass* f1_as_klass() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; } 1.338 + // Use the accessor f1() to acquire _f1's value. This is needed for 1.339 + // example in BytecodeInterpreter::run(), where is_f1_null() is 1.340 + // called to check if an invokedynamic call is resolved. This load 1.341 + // of _f1 must be ordered with the loads performed by 1.342 + // cache->main_entry_index(). 1.343 + bool is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == NULL; } // classifies a CPC entry as unbound 1.344 + int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } 1.345 + Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; } 1.346 + int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } 1.347 + int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } 1.348 + bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } 1.349 + bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } 1.350 + bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } 1.351 + bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } 1.352 + bool has_appendix() const { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0; } 1.353 + bool has_method_type() const { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift)) != 0; } 1.354 + bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } 1.355 + bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } 1.356 + bool is_byte() const { return flag_state() == btos; } 1.357 + bool is_char() const { return flag_state() == ctos; } 1.358 + bool is_short() const { return flag_state() == stos; } 1.359 + bool is_int() const { return flag_state() == itos; } 1.360 + bool is_long() const { return flag_state() == ltos; } 1.361 + bool is_float() const { return flag_state() == ftos; } 1.362 + bool is_double() const { return flag_state() == dtos; } 1.363 + bool is_object() const { return flag_state() == atos; } 1.364 + TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); 1.365 + return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } 1.366 + 1.367 + // Code generation support 1.368 + static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); } 1.369 + static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); } 1.370 + static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); } 1.371 + static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); } 1.372 + static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); } 1.373 + static ByteSize flags_offset() { return byte_offset_of(ConstantPoolCacheEntry, _flags); } 1.374 + 1.375 +#if INCLUDE_JVMTI 1.376 + // RedefineClasses() API support: 1.377 + // If this ConstantPoolCacheEntry refers to old_method then update it 1.378 + // to refer to new_method. 1.379 + // trace_name_printed is set to true if the current call has 1.380 + // printed the klass name so that other routines in the adjust_* 1.381 + // group don't print the klass name. 1.382 + bool adjust_method_entry(Method* old_method, Method* new_method, 1.383 + bool * trace_name_printed); 1.384 + bool check_no_old_or_obsolete_entries(); 1.385 + bool is_interesting_method_entry(Klass* k); 1.386 +#endif // INCLUDE_JVMTI 1.387 + 1.388 + // Debugging & Printing 1.389 + void print (outputStream* st, int index) const; 1.390 + void verify(outputStream* st) const; 1.391 + 1.392 + static void verify_tos_state_shift() { 1.393 + // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state: 1.394 + assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask"); 1.395 + } 1.396 +}; 1.397 + 1.398 + 1.399 +// A constant pool cache is a runtime data structure set aside to a constant pool. The cache 1.400 +// holds interpreter runtime information for all field access and invoke bytecodes. The cache 1.401 +// is created and initialized before a class is actively used (i.e., initialized), the indivi- 1.402 +// dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*). 1.403 + 1.404 +class ConstantPoolCache: public MetaspaceObj { 1.405 + friend class VMStructs; 1.406 + friend class MetadataFactory; 1.407 + private: 1.408 + int _length; 1.409 + ConstantPool* _constant_pool; // the corresponding constant pool 1.410 + 1.411 + // Sizing 1.412 + debug_only(friend class ClassVerifier;) 1.413 + 1.414 + // Constructor 1.415 + ConstantPoolCache(int length, 1.416 + const intStack& inverse_index_map, 1.417 + const intStack& invokedynamic_inverse_index_map, 1.418 + const intStack& invokedynamic_references_map) : 1.419 + _length(length), 1.420 + _constant_pool(NULL) { 1.421 + initialize(inverse_index_map, invokedynamic_inverse_index_map, 1.422 + invokedynamic_references_map); 1.423 + for (int i = 0; i < length; i++) { 1.424 + assert(entry_at(i)->is_f1_null(), "Failed to clear?"); 1.425 + } 1.426 + } 1.427 + 1.428 + // Initialization 1.429 + void initialize(const intArray& inverse_index_map, 1.430 + const intArray& invokedynamic_inverse_index_map, 1.431 + const intArray& invokedynamic_references_map); 1.432 + public: 1.433 + static ConstantPoolCache* allocate(ClassLoaderData* loader_data, 1.434 + const intStack& cp_cache_map, 1.435 + const intStack& invokedynamic_cp_cache_map, 1.436 + const intStack& invokedynamic_references_map, TRAPS); 1.437 + bool is_constantPoolCache() const { return true; } 1.438 + 1.439 + int length() const { return _length; } 1.440 + private: 1.441 + void set_length(int length) { _length = length; } 1.442 + 1.443 + static int header_size() { return sizeof(ConstantPoolCache) / HeapWordSize; } 1.444 + static int size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); } 1.445 + public: 1.446 + int size() const { return size(length()); } 1.447 + private: 1.448 + 1.449 + // Helpers 1.450 + ConstantPool** constant_pool_addr() { return &_constant_pool; } 1.451 + ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); } 1.452 + 1.453 + friend class constantPoolCacheKlass; 1.454 + friend class ConstantPoolCacheEntry; 1.455 + 1.456 + public: 1.457 + // Accessors 1.458 + void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; } 1.459 + ConstantPool* constant_pool() const { return _constant_pool; } 1.460 + // Fetches the entry at the given index. 1.461 + // In either case the index must not be encoded or byte-swapped in any way. 1.462 + ConstantPoolCacheEntry* entry_at(int i) const { 1.463 + assert(0 <= i && i < length(), "index out of bounds"); 1.464 + return base() + i; 1.465 + } 1.466 + 1.467 + // Code generation 1.468 + static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); } 1.469 + static ByteSize entry_offset(int raw_index) { 1.470 + int index = raw_index; 1.471 + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); 1.472 + } 1.473 + 1.474 +#if INCLUDE_JVMTI 1.475 + // RedefineClasses() API support: 1.476 + // If any entry of this ConstantPoolCache points to any of 1.477 + // old_methods, replace it with the corresponding new_method. 1.478 + // trace_name_printed is set to true if the current call has 1.479 + // printed the klass name so that other routines in the adjust_* 1.480 + // group don't print the klass name. 1.481 + void adjust_method_entries(Method** old_methods, Method** new_methods, 1.482 + int methods_length, bool * trace_name_printed); 1.483 + bool check_no_old_or_obsolete_entries(); 1.484 + void dump_cache(); 1.485 +#endif // INCLUDE_JVMTI 1.486 + 1.487 + // Deallocate - no fields to deallocate 1.488 + DEBUG_ONLY(bool on_stack() { return false; }) 1.489 + void deallocate_contents(ClassLoaderData* data) {} 1.490 + bool is_klass() const { return false; } 1.491 + 1.492 + // Printing 1.493 + void print_on(outputStream* st) const; 1.494 + void print_value_on(outputStream* st) const; 1.495 + 1.496 + const char* internal_name() const { return "{constant pool cache}"; } 1.497 + 1.498 + // Verify 1.499 + void verify_on(outputStream* st); 1.500 +}; 1.501 + 1.502 +#endif // SHARE_VM_OOPS_CPCACHEOOP_HPP