1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/interpreter/rewriter.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,214 @@ 1.4 +/* 1.5 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_INTERPRETER_REWRITER_HPP 1.29 +#define SHARE_VM_INTERPRETER_REWRITER_HPP 1.30 + 1.31 +#include "memory/allocation.hpp" 1.32 +#include "runtime/handles.inline.hpp" 1.33 +#include "utilities/growableArray.hpp" 1.34 + 1.35 +// The Rewriter adds caches to the constant pool and rewrites bytecode indices 1.36 +// pointing into the constant pool for better interpreter performance. 1.37 + 1.38 +class Rewriter: public StackObj { 1.39 + private: 1.40 + instanceKlassHandle _klass; 1.41 + constantPoolHandle _pool; 1.42 + Array<Method*>* _methods; 1.43 + intArray _cp_map; 1.44 + intStack _cp_cache_map; // for Methodref, Fieldref, 1.45 + // InterfaceMethodref and InvokeDynamic 1.46 + intArray _reference_map; // maps from cp index to resolved_refs index (or -1) 1.47 + intStack _resolved_references_map; // for strings, methodHandle, methodType 1.48 + intStack _invokedynamic_references_map; // for invokedynamic resolved refs 1.49 + intArray _method_handle_invokers; 1.50 + int _resolved_reference_limit; 1.51 + 1.52 + // For mapping invokedynamic bytecodes, which are discovered during method 1.53 + // scanning. The invokedynamic entries are added at the end of the cpCache. 1.54 + // If there are any invokespecial/InterfaceMethodref special case bytecodes, 1.55 + // these entries are added before invokedynamic entries so that the 1.56 + // invokespecial bytecode 16 bit index doesn't overflow. 1.57 + intStack _invokedynamic_cp_cache_map; 1.58 + 1.59 + // For patching. 1.60 + GrowableArray<address>* _patch_invokedynamic_bcps; 1.61 + GrowableArray<int>* _patch_invokedynamic_refs; 1.62 + 1.63 + void init_maps(int length) { 1.64 + _cp_map.initialize(length, -1); 1.65 + // Choose an initial value large enough that we don't get frequent 1.66 + // calls to grow(). 1.67 + _cp_cache_map.initialize(length/2); 1.68 + // Also cache resolved objects, in another different cache. 1.69 + _reference_map.initialize(length, -1); 1.70 + _resolved_references_map.initialize(length/2); 1.71 + _invokedynamic_references_map.initialize(length/2); 1.72 + _resolved_reference_limit = -1; 1.73 + _first_iteration_cp_cache_limit = -1; 1.74 + 1.75 + // invokedynamic specific fields 1.76 + _invokedynamic_cp_cache_map.initialize(length/4); 1.77 + _patch_invokedynamic_bcps = new GrowableArray<address>(length/4); 1.78 + _patch_invokedynamic_refs = new GrowableArray<int>(length/4); 1.79 + } 1.80 + 1.81 + int _first_iteration_cp_cache_limit; 1.82 + void record_map_limits() { 1.83 + // Record initial size of the two arrays generated for the CP cache 1.84 + // relative to walking the constant pool. 1.85 + _first_iteration_cp_cache_limit = _cp_cache_map.length(); 1.86 + _resolved_reference_limit = _resolved_references_map.length(); 1.87 + } 1.88 + 1.89 + int cp_cache_delta() { 1.90 + // How many cp cache entries were added since recording map limits after 1.91 + // cp cache initialization? 1.92 + assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); 1.93 + return _cp_cache_map.length() - _first_iteration_cp_cache_limit; 1.94 + } 1.95 + 1.96 + int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } 1.97 + bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } 1.98 + 1.99 + int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { 1.100 + assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); 1.101 + int cache_index = cp_cache_map->append(cp_index); 1.102 + cp_map->at_put(cp_index, cache_index); 1.103 + return cache_index; 1.104 + } 1.105 + 1.106 + int add_cp_cache_entry(int cp_index) { 1.107 + assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); 1.108 + assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration"); 1.109 + int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map); 1.110 + assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); 1.111 + assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.112 + return cache_index; 1.113 + } 1.114 + 1.115 + int add_invokedynamic_cp_cache_entry(int cp_index) { 1.116 + assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); 1.117 + assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration"); 1.118 + // add to the invokedynamic index map. 1.119 + int cache_index = _invokedynamic_cp_cache_map.append(cp_index); 1.120 + // do not update _cp_map, since the mapping is one-to-many 1.121 + assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.122 + // this index starts at one but in the bytecode it's appended to the end. 1.123 + return cache_index + _first_iteration_cp_cache_limit; 1.124 + } 1.125 + 1.126 + int invokedynamic_cp_cache_entry_pool_index(int cache_index) { 1.127 + int cp_index = _invokedynamic_cp_cache_map[cache_index]; 1.128 + return cp_index; 1.129 + } 1.130 + 1.131 + // add a new CP cache entry beyond the normal cache for the special case of 1.132 + // invokespecial with InterfaceMethodref as cpool operand. 1.133 + int add_invokespecial_cp_cache_entry(int cp_index) { 1.134 + assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration"); 1.135 + // Don't add InterfaceMethodref if it already exists at the end. 1.136 + for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) { 1.137 + if (cp_cache_entry_pool_index(i) == cp_index) { 1.138 + return i; 1.139 + } 1.140 + } 1.141 + int cache_index = _cp_cache_map.append(cp_index); 1.142 + assert(cache_index >= _first_iteration_cp_cache_limit, ""); 1.143 + // do not update _cp_map, since the mapping is one-to-many 1.144 + assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.145 + return cache_index; 1.146 + } 1.147 + 1.148 + int cp_entry_to_resolved_references(int cp_index) const { 1.149 + assert(has_entry_in_resolved_references(cp_index), "oob"); 1.150 + return _reference_map[cp_index]; 1.151 + } 1.152 + bool has_entry_in_resolved_references(int cp_index) const { 1.153 + return (uint)cp_index < (uint)_reference_map.length() && _reference_map[cp_index] >= 0; 1.154 + } 1.155 + 1.156 + // add a new entry to the resolved_references map 1.157 + int add_resolved_references_entry(int cp_index) { 1.158 + int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); 1.159 + assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); 1.160 + return ref_index; 1.161 + } 1.162 + 1.163 + // add a new entries to the resolved_references map (for invokedynamic and invokehandle only) 1.164 + int add_invokedynamic_resolved_references_entries(int cp_index, int cache_index) { 1.165 + assert(_resolved_reference_limit >= 0, "must add indy refs after first iteration"); 1.166 + int ref_index = -1; 1.167 + for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) { 1.168 + const int index = _resolved_references_map.append(cp_index); // many-to-one 1.169 + assert(index >= _resolved_reference_limit, ""); 1.170 + if (entry == 0) { 1.171 + ref_index = index; 1.172 + } 1.173 + assert((index - entry) == ref_index, "entries must be consecutive"); 1.174 + _invokedynamic_references_map.at_put_grow(index, cache_index, -1); 1.175 + } 1.176 + return ref_index; 1.177 + } 1.178 + 1.179 + int resolved_references_entry_to_pool_index(int ref_index) { 1.180 + int cp_index = _resolved_references_map[ref_index]; 1.181 + return cp_index; 1.182 + } 1.183 + 1.184 + // Access the contents of _cp_cache_map to determine CP cache layout. 1.185 + int cp_cache_entry_pool_index(int cache_index) { 1.186 + int cp_index = _cp_cache_map[cache_index]; 1.187 + return cp_index; 1.188 + } 1.189 + 1.190 + // All the work goes in here: 1.191 + Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS); 1.192 + 1.193 + void compute_index_maps(); 1.194 + void make_constant_pool_cache(TRAPS); 1.195 + void scan_method(Method* m, bool reverse, bool* invokespecial_error); 1.196 + void rewrite_Object_init(methodHandle m, TRAPS); 1.197 + void rewrite_member_reference(address bcp, int offset, bool reverse); 1.198 + void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); 1.199 + void rewrite_invokedynamic(address bcp, int offset, bool reverse); 1.200 + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse); 1.201 + void rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error); 1.202 + 1.203 + void patch_invokedynamic_bytecodes(); 1.204 + 1.205 + // Do all the work. 1.206 + void rewrite_bytecodes(TRAPS); 1.207 + 1.208 + // Revert bytecodes in case of an exception. 1.209 + void restore_bytecodes(); 1.210 + 1.211 + static methodHandle rewrite_jsrs(methodHandle m, TRAPS); 1.212 + public: 1.213 + // Driver routine: 1.214 + static void rewrite(instanceKlassHandle klass, TRAPS); 1.215 +}; 1.216 + 1.217 +#endif // SHARE_VM_INTERPRETER_REWRITER_HPP