1.1 --- a/src/share/vm/interpreter/rewriter.cpp Mon Apr 20 14:48:03 2009 -0700 1.2 +++ b/src/share/vm/interpreter/rewriter.cpp Tue Apr 21 23:21:04 2009 -0700 1.3 @@ -25,39 +25,50 @@ 1.4 # include "incls/_precompiled.incl" 1.5 # include "incls/_rewriter.cpp.incl" 1.6 1.7 - 1.8 -// Computes an index_map (new_index -> original_index) for contant pool entries 1.9 +// Computes a CPC map (new_index -> original_index) for constant pool entries 1.10 // that are referred to by the interpreter at runtime via the constant pool cache. 1.11 -void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map) { 1.12 - const int length = pool->length(); 1.13 - index_map = new intArray(length, -1); 1.14 - // Choose an initial value large enough that we don't get frequent 1.15 - // calls to grow(). 1.16 - inverse_index_map = new intStack(length / 2); 1.17 +// Also computes a CP map (original_index -> new_index). 1.18 +// Marks entries in CP which require additional processing. 1.19 +void Rewriter::compute_index_maps() { 1.20 + const int length = _pool->length(); 1.21 + init_cp_map(length); 1.22 for (int i = 0; i < length; i++) { 1.23 - switch (pool->tag_at(i).value()) { 1.24 + int tag = _pool->tag_at(i).value(); 1.25 + switch (tag) { 1.26 + case JVM_CONSTANT_InterfaceMethodref: 1.27 case JVM_CONSTANT_Fieldref : // fall through 1.28 case JVM_CONSTANT_Methodref : // fall through 1.29 - case JVM_CONSTANT_InterfaceMethodref: { 1.30 - index_map->at_put(i, inverse_index_map->length()); 1.31 - inverse_index_map->append(i); 1.32 - } 1.33 + add_cp_cache_entry(i); 1.34 + break; 1.35 } 1.36 } 1.37 + 1.38 + guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1), 1.39 + "all cp cache indexes fit in a u2"); 1.40 } 1.41 1.42 1.43 -// Creates a constant pool cache given an inverse_index_map 1.44 +int Rewriter::add_extra_cp_cache_entry(int main_entry) { 1.45 + // Hack: We put it on the map as an encoded value. 1.46 + // The only place that consumes this is ConstantPoolCacheEntry::set_initial_state 1.47 + int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry); 1.48 + int plain_secondary_index = _cp_cache_map.append(encoded); 1.49 + return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index); 1.50 +} 1.51 + 1.52 + 1.53 + 1.54 +// Creates a constant pool cache given a CPC map 1.55 // This creates the constant pool cache initially in a state 1.56 // that is unsafe for concurrent GC processing but sets it to 1.57 // a safe mode before the constant pool cache is returned. 1.58 -constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) { 1.59 - const int length = inverse_index_map.length(); 1.60 - constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, 1.61 - methodOopDesc::IsUnsafeConc, 1.62 - CHECK_(constantPoolCacheHandle())); 1.63 - cache->initialize(inverse_index_map); 1.64 - return constantPoolCacheHandle(THREAD, cache); 1.65 +void Rewriter::make_constant_pool_cache(TRAPS) { 1.66 + const int length = _cp_cache_map.length(); 1.67 + constantPoolCacheOop cache = 1.68 + oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK); 1.69 + cache->initialize(_cp_cache_map); 1.70 + _pool->set_cache(cache); 1.71 + cache->set_constant_pool(_pool()); 1.72 } 1.73 1.74 1.75 @@ -101,8 +112,38 @@ 1.76 } 1.77 1.78 1.79 +// Rewrite a classfile-order CP index into a native-order CPC index. 1.80 +int Rewriter::rewrite_member_reference(address bcp, int offset) { 1.81 + address p = bcp + offset; 1.82 + int cp_index = Bytes::get_Java_u2(p); 1.83 + int cache_index = cp_entry_to_cp_cache(cp_index); 1.84 + Bytes::put_native_u2(p, cache_index); 1.85 + return cp_index; 1.86 +} 1.87 + 1.88 + 1.89 +void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) { 1.90 + address p = bcp + offset; 1.91 + assert(p[-1] == Bytecodes::_invokedynamic, ""); 1.92 + int cp_index = Bytes::get_Java_u2(p); 1.93 + int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily 1.94 + int cpc2 = add_extra_cp_cache_entry(cpc); 1.95 + 1.96 + // Replace the trailing four bytes with a CPC index for the dynamic 1.97 + // call site. Unlike other CPC entries, there is one per bytecode, 1.98 + // not just one per distinct CP entry. In other words, the 1.99 + // CPC-to-CP relation is many-to-one for invokedynamic entries. 1.100 + // This means we must use a larger index size than u2 to address 1.101 + // all these entries. That is the main reason invokedynamic 1.102 + // must have a five-byte instruction format. (Of course, other JVM 1.103 + // implementations can use the bytes for other purposes.) 1.104 + Bytes::put_native_u4(p, cpc2); 1.105 + // Note: We use native_u4 format exclusively for 4-byte indexes. 1.106 +} 1.107 + 1.108 + 1.109 // Rewrites a method given the index_map information 1.110 -methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) { 1.111 +void Rewriter::scan_method(methodOop method) { 1.112 1.113 int nof_jsrs = 0; 1.114 bool has_monitor_bytecodes = false; 1.115 @@ -121,6 +162,7 @@ 1.116 int bc_length; 1.117 for (int bci = 0; bci < code_length; bci += bc_length) { 1.118 address bcp = code_base + bci; 1.119 + int prefix_length = 0; 1.120 c = (Bytecodes::Code)(*bcp); 1.121 1.122 // Since we have the code, see if we can get the length 1.123 @@ -135,6 +177,7 @@ 1.124 // by 'wide'. We don't currently examine any of the bytecodes 1.125 // modified by wide, but in case we do in the future... 1.126 if (c == Bytecodes::_wide) { 1.127 + prefix_length = 1; 1.128 c = (Bytecodes::Code)bcp[1]; 1.129 } 1.130 } 1.131 @@ -159,12 +202,13 @@ 1.132 case Bytecodes::_putfield : // fall through 1.133 case Bytecodes::_invokevirtual : // fall through 1.134 case Bytecodes::_invokespecial : // fall through 1.135 - case Bytecodes::_invokestatic : // fall through 1.136 - case Bytecodes::_invokeinterface: { 1.137 - address p = bcp + 1; 1.138 - Bytes::put_native_u2(p, index_map[Bytes::get_Java_u2(p)]); 1.139 + case Bytecodes::_invokestatic : 1.140 + case Bytecodes::_invokeinterface: 1.141 + rewrite_member_reference(bcp, prefix_length+1); 1.142 break; 1.143 - } 1.144 + case Bytecodes::_invokedynamic: 1.145 + rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME")); 1.146 + break; 1.147 case Bytecodes::_jsr : // fall through 1.148 case Bytecodes::_jsr_w : nof_jsrs++; break; 1.149 case Bytecodes::_monitorenter : // fall through 1.150 @@ -182,53 +226,56 @@ 1.151 // have to be rewritten, so we run the oopMapGenerator on the method 1.152 if (nof_jsrs > 0) { 1.153 method->set_has_jsrs(); 1.154 - ResolveOopMapConflicts romc(method); 1.155 - methodHandle original_method = method; 1.156 - method = romc.do_potential_rewrite(CHECK_(methodHandle())); 1.157 - if (method() != original_method()) { 1.158 - // Insert invalid bytecode into original methodOop and set 1.159 - // interpreter entrypoint, so that a executing this method 1.160 - // will manifest itself in an easy recognizable form. 1.161 - address bcp = original_method->bcp_from(0); 1.162 - *bcp = (u1)Bytecodes::_shouldnotreachhere; 1.163 - int kind = Interpreter::method_kind(original_method); 1.164 - original_method->set_interpreter_kind(kind); 1.165 - } 1.166 + // Second pass will revisit this method. 1.167 + assert(method->has_jsrs(), ""); 1.168 + } 1.169 +} 1.170 1.171 - // Update monitor matching info. 1.172 - if (romc.monitor_safe()) { 1.173 - method->set_guaranteed_monitor_matching(); 1.174 - } 1.175 +// After constant pool is created, revisit methods containing jsrs. 1.176 +methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) { 1.177 + ResolveOopMapConflicts romc(method); 1.178 + methodHandle original_method = method; 1.179 + method = romc.do_potential_rewrite(CHECK_(methodHandle())); 1.180 + if (method() != original_method()) { 1.181 + // Insert invalid bytecode into original methodOop and set 1.182 + // interpreter entrypoint, so that a executing this method 1.183 + // will manifest itself in an easy recognizable form. 1.184 + address bcp = original_method->bcp_from(0); 1.185 + *bcp = (u1)Bytecodes::_shouldnotreachhere; 1.186 + int kind = Interpreter::method_kind(original_method); 1.187 + original_method->set_interpreter_kind(kind); 1.188 } 1.189 1.190 - // Setup method entrypoints for compiler and interpreter 1.191 - method->link_method(method, CHECK_(methodHandle())); 1.192 + // Update monitor matching info. 1.193 + if (romc.monitor_safe()) { 1.194 + method->set_guaranteed_monitor_matching(); 1.195 + } 1.196 1.197 return method; 1.198 } 1.199 1.200 1.201 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) { 1.202 - // gather starting points 1.203 ResourceMark rm(THREAD); 1.204 - constantPoolHandle pool (THREAD, klass->constants()); 1.205 - objArrayHandle methods (THREAD, klass->methods()); 1.206 - assert(pool->cache() == NULL, "constant pool cache must not be set yet"); 1.207 + Rewriter rw(klass, CHECK); 1.208 + // (That's all, folks.) 1.209 +} 1.210 + 1.211 +Rewriter::Rewriter(instanceKlassHandle klass, TRAPS) 1.212 + : _klass(klass), 1.213 + // gather starting points 1.214 + _pool( THREAD, klass->constants()), 1.215 + _methods(THREAD, klass->methods()) 1.216 +{ 1.217 + assert(_pool->cache() == NULL, "constant pool cache must not be set yet"); 1.218 1.219 // determine index maps for methodOop rewriting 1.220 - intArray* index_map = NULL; 1.221 - intStack* inverse_index_map = NULL; 1.222 - compute_index_maps(pool, index_map, inverse_index_map); 1.223 + compute_index_maps(); 1.224 1.225 - // allocate constant pool cache 1.226 - constantPoolCacheHandle cache = new_constant_pool_cache(*inverse_index_map, CHECK); 1.227 - pool->set_cache(cache()); 1.228 - cache->set_constant_pool(pool()); 1.229 - 1.230 - if (RegisterFinalizersAtInit && klass->name() == vmSymbols::java_lang_Object()) { 1.231 - int i = methods->length(); 1.232 + if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) { 1.233 + int i = _methods->length(); 1.234 while (i-- > 0) { 1.235 - methodOop method = (methodOop)methods->obj_at(i); 1.236 + methodOop method = (methodOop)_methods->obj_at(i); 1.237 if (method->intrinsic_id() == vmIntrinsics::_Object_init) { 1.238 // rewrite the return bytecodes of Object.<init> to register the 1.239 // object for finalization if needed. 1.240 @@ -239,13 +286,27 @@ 1.241 } 1.242 } 1.243 1.244 - // rewrite methods 1.245 - { int i = methods->length(); 1.246 - while (i-- > 0) { 1.247 - methodHandle m(THREAD, (methodOop)methods->obj_at(i)); 1.248 - m = rewrite_method(m, *index_map, CHECK); 1.249 + // rewrite methods, in two passes 1.250 + int i, len = _methods->length(); 1.251 + 1.252 + for (i = len; --i >= 0; ) { 1.253 + methodOop method = (methodOop)_methods->obj_at(i); 1.254 + scan_method(method); 1.255 + } 1.256 + 1.257 + // allocate constant pool cache, now that we've seen all the bytecodes 1.258 + make_constant_pool_cache(CHECK); 1.259 + 1.260 + for (i = len; --i >= 0; ) { 1.261 + methodHandle m(THREAD, (methodOop)_methods->obj_at(i)); 1.262 + 1.263 + if (m->has_jsrs()) { 1.264 + m = rewrite_jsrs(m, CHECK); 1.265 // Method might have gotten rewritten. 1.266 - methods->obj_at_put(i, m()); 1.267 + _methods->obj_at_put(i, m()); 1.268 } 1.269 + 1.270 + // Set up method entry points for compiler and interpreter. 1.271 + m->link_method(m, CHECK); 1.272 } 1.273 }