1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/oops/cpCache.hpp Sat Sep 01 13:25:18 2012 -0400 1.3 @@ -0,0 +1,439 @@ 1.4 +/* 1.5 + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_OOPS_CPCACHEOOP_HPP 1.29 +#define SHARE_VM_OOPS_CPCACHEOOP_HPP 1.30 + 1.31 +#include "interpreter/bytecodes.hpp" 1.32 +#include "memory/allocation.hpp" 1.33 +#include "utilities/array.hpp" 1.34 + 1.35 +class PSPromotionManager; 1.36 + 1.37 +// A ConstantPoolCacheEntry describes an individual entry of the constant 1.38 +// pool cache. There's 2 principal kinds of entries: field entries for in- 1.39 +// stance & static field access, and method entries for invokes. Some of 1.40 +// the entry layout is shared and looks as follows: 1.41 +// 1.42 +// bit number |31 0| 1.43 +// bit length |-8--|-8--|---16----| 1.44 +// -------------------------------- 1.45 +// _indices [ b2 | b1 | index ] index = constant_pool_index 1.46 +// _f1 [ entry specific ] metadata ptr (method or klass) 1.47 +// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr 1.48 +// _flags [tos|0|F=1|0|0|f|v|0 |00000|field_index] (for field entries) 1.49 +// bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|----16-----] 1.50 +// _flags [tos|0|F=0|A|I|f|0|vf|00000|00000|psize] (for method entries) 1.51 +// bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|--8--|--8--] 1.52 + 1.53 +// -------------------------------- 1.54 +// 1.55 +// with: 1.56 +// index = original constant pool index 1.57 +// b1 = bytecode 1 1.58 +// b2 = bytecode 2 1.59 +// psize = parameters size (method entries only) 1.60 +// field_index = index into field information in holder InstanceKlass 1.61 +// The index max is 0xffff (max number of fields in constant pool) 1.62 +// and is multiplied by (InstanceKlass::next_offset) when accessing. 1.63 +// tos = TosState 1.64 +// F = the entry is for a field (or F=0 for a method) 1.65 +// A = call site has an appendix argument (loaded from resolved references) 1.66 +// I = interface call is forced virtual (must use a vtable index or vfinal) 1.67 +// f = field or method is final 1.68 +// v = field is volatile 1.69 +// vf = virtual but final (method entries only: is_vfinal()) 1.70 +// 1.71 +// The flags after TosState have the following interpretation: 1.72 +// bit 27: 0 for fields, 1 for methods 1.73 +// f flag true if field is marked final 1.74 +// v flag true if field is volatile (only for fields) 1.75 +// f2 flag true if f2 contains an oop (e.g., virtual final method) 1.76 +// fv flag true if invokeinterface used for method in class Object 1.77 +// 1.78 +// The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the 1.79 +// following mapping to the TosState states: 1.80 +// 1.81 +// btos: 0 1.82 +// ctos: 1 1.83 +// stos: 2 1.84 +// itos: 3 1.85 +// ltos: 4 1.86 +// ftos: 5 1.87 +// dtos: 6 1.88 +// atos: 7 1.89 +// vtos: 8 1.90 +// 1.91 +// Entry specific: field entries: 1.92 +// _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index 1.93 +// _f1 = field holder (as a java.lang.Class, not a Klass*) 1.94 +// _f2 = field offset in bytes 1.95 +// _flags = field type information, original FieldInfo index in field holder 1.96 +// (field_index section) 1.97 +// 1.98 +// Entry specific: method entries: 1.99 +// _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section), 1.100 +// original constant pool index 1.101 +// _f1 = Method* for non-virtual calls, unused by virtual calls. 1.102 +// for interface calls, which are essentially virtual but need a klass, 1.103 +// contains Klass* for the corresponding interface. 1.104 +// for invokedynamic, f1 contains a site-specific CallSite object (as an appendix) 1.105 +// for invokehandle, f1 contains a site-specific MethodType object (as an appendix) 1.106 +// (upcoming metadata changes will move the appendix to a separate array) 1.107 +// _f2 = vtable/itable index (or final Method*) for virtual calls only, 1.108 +// unused by non-virtual. The is_vfinal flag indicates this is a 1.109 +// method pointer for a final method, not an index. 1.110 +// _flags = method type info (t section), 1.111 +// virtual final bit (vfinal), 1.112 +// parameter size (psize section) 1.113 +// 1.114 +// Note: invokevirtual & invokespecial bytecodes can share the same constant 1.115 +// pool entry and thus the same constant pool cache entry. All invoke 1.116 +// bytecodes but invokevirtual use only _f1 and the corresponding b1 1.117 +// bytecode, while invokevirtual uses only _f2 and the corresponding 1.118 +// b2 bytecode. The value of _flags is shared for both types of entries. 1.119 +// 1.120 +// The fields are volatile so that they are stored in the order written in the 1.121 +// source code. The _indices field with the bytecode must be written last. 1.122 + 1.123 +class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { 1.124 + friend class VMStructs; 1.125 + friend class constantPoolCacheKlass; 1.126 + friend class ConstantPool; 1.127 + friend class InterpreterRuntime; 1.128 + 1.129 + private: 1.130 + volatile intx _indices; // constant pool index & rewrite bytecodes 1.131 + volatile Metadata* _f1; // entry specific metadata field 1.132 + volatile intx _f2; // entry specific int/metadata field 1.133 + volatile intx _flags; // flags 1.134 + 1.135 + 1.136 + void set_bytecode_1(Bytecodes::Code code); 1.137 + void set_bytecode_2(Bytecodes::Code code); 1.138 + void set_f1(Metadata* f1) { 1.139 + Metadata* existing_f1 = (Metadata*)_f1; // read once 1.140 + assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); 1.141 + _f1 = f1; 1.142 + } 1.143 + void release_set_f1(Metadata* f1); 1.144 + void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } 1.145 + void set_f2_as_vfinal_method(Method* f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; } 1.146 + int make_flags(TosState state, int option_bits, int field_index_or_method_params); 1.147 + void set_flags(intx flags) { _flags = flags; } 1.148 + bool init_flags_atomic(intx flags); 1.149 + void set_field_flags(TosState field_type, int option_bits, int field_index) { 1.150 + assert((field_index & field_index_mask) == field_index, "field_index in range"); 1.151 + set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); 1.152 + } 1.153 + void set_method_flags(TosState return_type, int option_bits, int method_params) { 1.154 + assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 1.155 + set_flags(make_flags(return_type, option_bits, method_params)); 1.156 + } 1.157 + bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) { 1.158 + assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 1.159 + return init_flags_atomic(make_flags(return_type, option_bits, method_params)); 1.160 + } 1.161 + 1.162 + public: 1.163 + // specific bit definitions for the flags field: 1.164 + // (Note: the interpreter must use these definitions to access the CP cache.) 1.165 + enum { 1.166 + // high order bits are the TosState corresponding to field type or method return type 1.167 + tos_state_bits = 4, 1.168 + tos_state_mask = right_n_bits(tos_state_bits), 1.169 + tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below 1.170 + // misc. option bits; can be any bit position in [16..27] 1.171 + is_field_entry_shift = 26, // (F) is it a field or a method? 1.172 + has_appendix_shift = 25, // (A) does the call site have an appendix argument? 1.173 + is_forced_virtual_shift = 24, // (I) is the interface reference forced to virtual mode? 1.174 + is_final_shift = 23, // (f) is the field or method final? 1.175 + is_volatile_shift = 22, // (v) is the field volatile? 1.176 + is_vfinal_shift = 21, // (vf) did the call resolve to a final method? 1.177 + // low order bits give field index (for FieldInfo) or method parameter size: 1.178 + field_index_bits = 16, 1.179 + field_index_mask = right_n_bits(field_index_bits), 1.180 + parameter_size_bits = 8, // subset of field_index_mask, range is 0..255 1.181 + parameter_size_mask = right_n_bits(parameter_size_bits), 1.182 + option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask)) 1.183 + }; 1.184 + 1.185 + // specific bit definitions for the indices field: 1.186 + enum { 1.187 + cp_index_bits = 2*BitsPerByte, 1.188 + cp_index_mask = right_n_bits(cp_index_bits), 1.189 + bytecode_1_shift = cp_index_bits, 1.190 + bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF 1.191 + bytecode_2_shift = cp_index_bits + BitsPerByte, 1.192 + bytecode_2_mask = right_n_bits(BitsPerByte) // == (u1)0xFF 1.193 + }; 1.194 + 1.195 + 1.196 + // Initialization 1.197 + void initialize_entry(int original_index); // initialize primary entry 1.198 + void initialize_resolved_reference_index(int ref_index) { 1.199 + assert(_f2 == 0, "set once"); // note: ref_index might be zero also 1.200 + _f2 = ref_index; 1.201 + } 1.202 + 1.203 + void set_field( // sets entry to resolved field state 1.204 + Bytecodes::Code get_code, // the bytecode used for reading the field 1.205 + Bytecodes::Code put_code, // the bytecode used for writing the field 1.206 + KlassHandle field_holder, // the object/klass holding the field 1.207 + int orig_field_index, // the original field index in the field holder 1.208 + int field_offset, // the field offset in words in the field holder 1.209 + TosState field_type, // the (machine) field type 1.210 + bool is_final, // the field is final 1.211 + bool is_volatile, // the field is volatile 1.212 + Klass* root_klass // needed by the GC to dirty the klass 1.213 + ); 1.214 + 1.215 + void set_method( // sets entry to resolved method entry 1.216 + Bytecodes::Code invoke_code, // the bytecode used for invoking the method 1.217 + methodHandle method, // the method/prototype if any (NULL, otherwise) 1.218 + int vtable_index // the vtable index if any, else negative 1.219 + ); 1.220 + 1.221 + void set_interface_call( 1.222 + methodHandle method, // Resolved method 1.223 + int index // Method index into interface 1.224 + ); 1.225 + 1.226 + void set_method_handle( 1.227 + methodHandle method, // adapter for invokeExact, etc. 1.228 + Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.MethodType 1.229 + objArrayHandle resolved_references 1.230 + ); 1.231 + 1.232 + void set_dynamic_call( 1.233 + methodHandle method, // adapter for this call site 1.234 + Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.CallSite 1.235 + objArrayHandle resolved_references 1.236 + ); 1.237 + 1.238 + // Common code for invokedynamic and MH invocations. 1.239 + 1.240 + // The "appendix" is an optional call-site-specific parameter which is 1.241 + // pushed by the JVM at the end of the argument list. This argument may 1.242 + // be a MethodType for the MH.invokes and a CallSite for an invokedynamic 1.243 + // instruction. However, its exact type and use depends on the Java upcall, 1.244 + // which simply returns a compiled LambdaForm along with any reference 1.245 + // that LambdaForm needs to complete the call. If the upcall returns a 1.246 + // null appendix, the argument is not passed at all. 1.247 + // 1.248 + // The appendix is *not* represented in the signature of the symbolic 1.249 + // reference for the call site, but (if present) it *is* represented in 1.250 + // the Method* bound to the site. This means that static and dynamic 1.251 + // resolution logic needs to make slightly different assessments about the 1.252 + // number and types of arguments. 1.253 + void set_method_handle_common( 1.254 + Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic 1.255 + methodHandle adapter, // invoker method (f1) 1.256 + Handle appendix, // appendix such as CallSite, MethodType, etc. (refs[f2]) 1.257 + objArrayHandle resolved_references 1.258 + ); 1.259 + 1.260 + Method* method_if_resolved(constantPoolHandle cpool); 1.261 + oop appendix_if_resolved(constantPoolHandle cpool); 1.262 + 1.263 + void set_parameter_size(int value); 1.264 + 1.265 + // Which bytecode number (1 or 2) in the index field is valid for this bytecode? 1.266 + // Returns -1 if neither is valid. 1.267 + static int bytecode_number(Bytecodes::Code code) { 1.268 + switch (code) { 1.269 + case Bytecodes::_getstatic : // fall through 1.270 + case Bytecodes::_getfield : // fall through 1.271 + case Bytecodes::_invokespecial : // fall through 1.272 + case Bytecodes::_invokestatic : // fall through 1.273 + case Bytecodes::_invokeinterface : return 1; 1.274 + case Bytecodes::_putstatic : // fall through 1.275 + case Bytecodes::_putfield : // fall through 1.276 + case Bytecodes::_invokehandle : // fall through 1.277 + case Bytecodes::_invokedynamic : // fall through 1.278 + case Bytecodes::_invokevirtual : return 2; 1.279 + default : break; 1.280 + } 1.281 + return -1; 1.282 + } 1.283 + 1.284 + // Has this bytecode been resolved? Only valid for invokes and get/put field/static. 1.285 + bool is_resolved(Bytecodes::Code code) const { 1.286 + switch (bytecode_number(code)) { 1.287 + case 1: return (bytecode_1() == code); 1.288 + case 2: return (bytecode_2() == code); 1.289 + } 1.290 + return false; // default: not resolved 1.291 + } 1.292 + 1.293 + // Accessors 1.294 + int indices() const { return _indices; } 1.295 + int constant_pool_index() const { return (indices() & cp_index_mask); } 1.296 + Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices() >> bytecode_1_shift) & bytecode_1_mask); } 1.297 + Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices() >> bytecode_2_shift) & bytecode_2_mask); } 1.298 + Method* f1_as_method() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; } 1.299 + Klass* f1_as_klass() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; } 1.300 + bool is_f1_null() const { Metadata* f1 = (Metadata*)_f1; return f1 == NULL; } // classifies a CPC entry as unbound 1.301 + int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } 1.302 + Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; } 1.303 + int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } 1.304 + int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } 1.305 + bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } 1.306 + bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } 1.307 + bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } 1.308 + bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } 1.309 + bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } 1.310 + bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } 1.311 + bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } 1.312 + bool is_byte() const { return flag_state() == btos; } 1.313 + bool is_char() const { return flag_state() == ctos; } 1.314 + bool is_short() const { return flag_state() == stos; } 1.315 + bool is_int() const { return flag_state() == itos; } 1.316 + bool is_long() const { return flag_state() == ltos; } 1.317 + bool is_float() const { return flag_state() == ftos; } 1.318 + bool is_double() const { return flag_state() == dtos; } 1.319 + bool is_object() const { return flag_state() == atos; } 1.320 + TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); 1.321 + return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } 1.322 + 1.323 + // Code generation support 1.324 + static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); } 1.325 + static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); } 1.326 + static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); } 1.327 + static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); } 1.328 + static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); } 1.329 + static ByteSize flags_offset() { return byte_offset_of(ConstantPoolCacheEntry, _flags); } 1.330 + 1.331 + // RedefineClasses() API support: 1.332 + // If this constantPoolCacheEntry refers to old_method then update it 1.333 + // to refer to new_method. 1.334 + // trace_name_printed is set to true if the current call has 1.335 + // printed the klass name so that other routines in the adjust_* 1.336 + // group don't print the klass name. 1.337 + bool adjust_method_entry(Method* old_method, Method* new_method, 1.338 + bool * trace_name_printed); 1.339 + NOT_PRODUCT(bool check_no_old_entries();) 1.340 + bool is_interesting_method_entry(Klass* k); 1.341 + 1.342 + // Debugging & Printing 1.343 + void print (outputStream* st, int index) const; 1.344 + void verify(outputStream* st) const; 1.345 + 1.346 + static void verify_tos_state_shift() { 1.347 + // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state: 1.348 + assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask"); 1.349 + } 1.350 +}; 1.351 + 1.352 + 1.353 +// A constant pool cache is a runtime data structure set aside to a constant pool. The cache 1.354 +// holds interpreter runtime information for all field access and invoke bytecodes. The cache 1.355 +// is created and initialized before a class is actively used (i.e., initialized), the indivi- 1.356 +// dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*). 1.357 + 1.358 +class ConstantPoolCache: public MetaspaceObj { 1.359 + friend class VMStructs; 1.360 + friend class MetadataFactory; 1.361 + private: 1.362 + int _length; 1.363 + ConstantPool* _constant_pool; // the corresponding constant pool 1.364 + 1.365 + // Sizing 1.366 + debug_only(friend class ClassVerifier;) 1.367 + 1.368 + // Constructor 1.369 + ConstantPoolCache(int length) : _length(length), _constant_pool(NULL) { 1.370 + for (int i = 0; i < length; i++) { 1.371 + assert(entry_at(i)->is_f1_null(), "Failed to clear?"); 1.372 + } 1.373 + } 1.374 + 1.375 + public: 1.376 + static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, TRAPS); 1.377 + bool is_constantPoolCache() const { return true; } 1.378 + 1.379 + int length() const { return _length; } 1.380 + private: 1.381 + void set_length(int length) { _length = length; } 1.382 + 1.383 + static int header_size() { return sizeof(ConstantPoolCache) / HeapWordSize; } 1.384 + static int size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); } 1.385 + public: 1.386 + int size() const { return size(length()); } 1.387 + private: 1.388 + 1.389 + // Helpers 1.390 + ConstantPool** constant_pool_addr() { return &_constant_pool; } 1.391 + ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); } 1.392 + 1.393 + friend class constantPoolCacheKlass; 1.394 + friend class ConstantPoolCacheEntry; 1.395 + 1.396 + public: 1.397 + // Initialization 1.398 + void initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map); 1.399 + 1.400 + // Accessors 1.401 + void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; } 1.402 + ConstantPool* constant_pool() const { return _constant_pool; } 1.403 + // Fetches the entry at the given index. 1.404 + // In either case the index must not be encoded or byte-swapped in any way. 1.405 + ConstantPoolCacheEntry* entry_at(int i) const { 1.406 + assert(0 <= i && i < length(), "index out of bounds"); 1.407 + return base() + i; 1.408 + } 1.409 + 1.410 + // Code generation 1.411 + static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); } 1.412 + static ByteSize entry_offset(int raw_index) { 1.413 + int index = raw_index; 1.414 + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); 1.415 + } 1.416 + 1.417 + // RedefineClasses() API support: 1.418 + // If any entry of this constantPoolCache points to any of 1.419 + // old_methods, replace it with the corresponding new_method. 1.420 + // trace_name_printed is set to true if the current call has 1.421 + // printed the klass name so that other routines in the adjust_* 1.422 + // group don't print the klass name. 1.423 + void adjust_method_entries(Method** old_methods, Method** new_methods, 1.424 + int methods_length, bool * trace_name_printed); 1.425 + NOT_PRODUCT(bool check_no_old_entries();) 1.426 + 1.427 + // Deallocate - no fields to deallocate 1.428 + DEBUG_ONLY(bool on_stack() { return false; }) 1.429 + void deallocate_contents(ClassLoaderData* data) {} 1.430 + bool is_klass() const { return false; } 1.431 + 1.432 + // Printing 1.433 + void print_on(outputStream* st) const; 1.434 + void print_value_on(outputStream* st) const; 1.435 + 1.436 + const char* internal_name() const { return "{constant pool cache}"; } 1.437 + 1.438 + // Verify 1.439 + void verify_on(outputStream* st); 1.440 +}; 1.441 + 1.442 +#endif // SHARE_VM_OOPS_CPCACHEOOP_HPP