1.1 --- a/src/share/vm/interpreter/rewriter.cpp Wed Nov 13 07:31:26 2013 -0800 1.2 +++ b/src/share/vm/interpreter/rewriter.cpp Wed Nov 13 16:42:24 2013 -0500 1.3 @@ -70,21 +70,21 @@ 1.4 } 1.5 1.6 // Unrewrite the bytecodes if an error occurs. 1.7 -void Rewriter::restore_bytecodes() { 1.8 +void Rewriter::restore_bytecodes(TRAPS) { 1.9 int len = _methods->length(); 1.10 1.11 for (int i = len-1; i >= 0; i--) { 1.12 Method* method = _methods->at(i); 1.13 - scan_method(method, true); 1.14 + scan_method(method, true, CHECK); 1.15 } 1.16 } 1.17 1.18 // Creates a constant pool cache given a CPC map 1.19 void Rewriter::make_constant_pool_cache(TRAPS) { 1.20 - const int length = _cp_cache_map.length(); 1.21 ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data(); 1.22 ConstantPoolCache* cache = 1.23 - ConstantPoolCache::allocate(loader_data, length, _cp_cache_map, 1.24 + ConstantPoolCache::allocate(loader_data, _cp_cache_map, 1.25 + _invokedynamic_cp_cache_map, 1.26 _invokedynamic_references_map, CHECK); 1.27 1.28 // initialize object cache in constant pool 1.29 @@ -154,6 +154,31 @@ 1.30 } 1.31 } 1.32 1.33 +// If the constant pool entry for invokespecial is InterfaceMethodref, 1.34 +// we need to add a separate cpCache entry for its resolution, because it is 1.35 +// different than the resolution for invokeinterface with InterfaceMethodref. 1.36 +// These cannot share cpCache entries. It's unclear if all invokespecial to 1.37 +// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry 1.38 +// is created for each one. This was added with lambda. 1.39 +void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) { 1.40 + static int count = 0; 1.41 + address p = bcp + offset; 1.42 + if (!reverse) { 1.43 + int cp_index = Bytes::get_Java_u2(p); 1.44 + int cache_index = add_invokespecial_cp_cache_entry(cp_index); 1.45 + if (cache_index != (int)(jushort) cache_index) { 1.46 + THROW_MSG(vmSymbols::java_lang_InternalError(), 1.47 + "This classfile overflows invokespecial for interfaces " 1.48 + "and cannot be loaded"); 1.49 + } 1.50 + Bytes::put_native_u2(p, cache_index); 1.51 + } else { 1.52 + int cache_index = Bytes::get_native_u2(p); 1.53 + int cp_index = cp_cache_entry_pool_index(cache_index); 1.54 + Bytes::put_Java_u2(p, cp_index); 1.55 + } 1.56 +} 1.57 + 1.58 1.59 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) 1.60 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) { 1.61 @@ -203,7 +228,7 @@ 1.62 if (!reverse) { 1.63 int cp_index = Bytes::get_Java_u2(p); 1.64 int cache_index = add_invokedynamic_cp_cache_entry(cp_index); 1.65 - add_invokedynamic_resolved_references_entries(cp_index, cache_index); 1.66 + int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index); 1.67 // Replace the trailing four bytes with a CPC index for the dynamic 1.68 // call site. Unlike other CPC entries, there is one per bytecode, 1.69 // not just one per distinct CP entry. In other words, the 1.70 @@ -212,13 +237,20 @@ 1.71 // all these entries. That is the main reason invokedynamic 1.72 // must have a five-byte instruction format. (Of course, other JVM 1.73 // implementations can use the bytes for other purposes.) 1.74 + // Note: We use native_u4 format exclusively for 4-byte indexes. 1.75 Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index)); 1.76 - // Note: We use native_u4 format exclusively for 4-byte indexes. 1.77 + // add the bcp in case we need to patch this bytecode if we also find a 1.78 + // invokespecial/InterfaceMethodref in the bytecode stream 1.79 + _patch_invokedynamic_bcps->push(p); 1.80 + _patch_invokedynamic_refs->push(resolved_index); 1.81 } else { 1.82 - // callsite index 1.83 int cache_index = ConstantPool::decode_invokedynamic_index( 1.84 Bytes::get_native_u4(p)); 1.85 - int cp_index = cp_cache_entry_pool_index(cache_index); 1.86 + // We will reverse the bytecode rewriting _after_ adjusting them. 1.87 + // Adjust the cache index by offset to the invokedynamic entries in the 1.88 + // cpCache plus the delta if the invokedynamic bytecodes were adjusted. 1.89 + cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit; 1.90 + int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index); 1.91 assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index"); 1.92 // zero out 4 bytes 1.93 Bytes::put_Java_u4(p, 0); 1.94 @@ -226,6 +258,34 @@ 1.95 } 1.96 } 1.97 1.98 +void Rewriter::patch_invokedynamic_bytecodes() { 1.99 + // If the end of the cp_cache is the same as after initializing with the 1.100 + // cpool, nothing needs to be done. Invokedynamic bytecodes are at the 1.101 + // correct offsets. ie. no invokespecials added 1.102 + int delta = cp_cache_delta(); 1.103 + if (delta > 0) { 1.104 + int length = _patch_invokedynamic_bcps->length(); 1.105 + assert(length == _patch_invokedynamic_refs->length(), 1.106 + "lengths should match"); 1.107 + for (int i = 0; i < length; i++) { 1.108 + address p = _patch_invokedynamic_bcps->at(i); 1.109 + int cache_index = ConstantPool::decode_invokedynamic_index( 1.110 + Bytes::get_native_u4(p)); 1.111 + Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta)); 1.112 + 1.113 + // invokedynamic resolved references map also points to cp cache and must 1.114 + // add delta to each. 1.115 + int resolved_index = _patch_invokedynamic_refs->at(i); 1.116 + for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) { 1.117 + assert(_invokedynamic_references_map[resolved_index+entry] == cache_index, 1.118 + "should be the same index"); 1.119 + _invokedynamic_references_map.at_put(resolved_index+entry, 1.120 + cache_index + delta); 1.121 + } 1.122 + } 1.123 + } 1.124 +} 1.125 + 1.126 1.127 // Rewrite some ldc bytecodes to _fast_aldc 1.128 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide, 1.129 @@ -269,7 +329,7 @@ 1.130 1.131 1.132 // Rewrites a method given the index_map information 1.133 -void Rewriter::scan_method(Method* method, bool reverse) { 1.134 +void Rewriter::scan_method(Method* method, bool reverse, TRAPS) { 1.135 1.136 int nof_jsrs = 0; 1.137 bool has_monitor_bytecodes = false; 1.138 @@ -329,12 +389,25 @@ 1.139 #endif 1.140 break; 1.141 } 1.142 + 1.143 + case Bytecodes::_invokespecial : { 1.144 + int offset = prefix_length + 1; 1.145 + address p = bcp + offset; 1.146 + int cp_index = Bytes::get_Java_u2(p); 1.147 + // InterfaceMethodref 1.148 + if (_pool->tag_at(cp_index).is_interface_method()) { 1.149 + rewrite_invokespecial(bcp, offset, reverse, CHECK); 1.150 + } else { 1.151 + rewrite_member_reference(bcp, offset, reverse); 1.152 + } 1.153 + break; 1.154 + } 1.155 + 1.156 case Bytecodes::_getstatic : // fall through 1.157 case Bytecodes::_putstatic : // fall through 1.158 case Bytecodes::_getfield : // fall through 1.159 case Bytecodes::_putfield : // fall through 1.160 case Bytecodes::_invokevirtual : // fall through 1.161 - case Bytecodes::_invokespecial : // fall through 1.162 case Bytecodes::_invokestatic : 1.163 case Bytecodes::_invokeinterface: 1.164 case Bytecodes::_invokehandle : // if reverse=true 1.165 @@ -426,16 +499,21 @@ 1.166 1.167 for (int i = len-1; i >= 0; i--) { 1.168 Method* method = _methods->at(i); 1.169 - scan_method(method); 1.170 + scan_method(method, false, CHECK); // If you get an error here, 1.171 + // there is no reversing bytecodes 1.172 } 1.173 1.174 + // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref 1.175 + // entries had to be added. 1.176 + patch_invokedynamic_bytecodes(); 1.177 + 1.178 // allocate constant pool cache, now that we've seen all the bytecodes 1.179 make_constant_pool_cache(THREAD); 1.180 1.181 // Restore bytecodes to their unrewritten state if there are exceptions 1.182 // rewriting bytecodes or allocating the cpCache 1.183 if (HAS_PENDING_EXCEPTION) { 1.184 - restore_bytecodes(); 1.185 + restore_bytecodes(CATCH); 1.186 return; 1.187 } 1.188 1.189 @@ -452,7 +530,7 @@ 1.190 // relocating bytecodes. If some are relocated, that is ok because that 1.191 // doesn't affect constant pool to cpCache rewriting. 1.192 if (HAS_PENDING_EXCEPTION) { 1.193 - restore_bytecodes(); 1.194 + restore_bytecodes(CATCH); 1.195 return; 1.196 } 1.197 // Method might have gotten rewritten.