44 intStack _resolved_references_map; // for strings, methodHandle, methodType |
44 intStack _resolved_references_map; // for strings, methodHandle, methodType |
45 intStack _invokedynamic_references_map; // for invokedynamic resolved refs |
45 intStack _invokedynamic_references_map; // for invokedynamic resolved refs |
46 intArray _method_handle_invokers; |
46 intArray _method_handle_invokers; |
47 int _resolved_reference_limit; |
47 int _resolved_reference_limit; |
48 |
48 |
|
49 // For mapping invokedynamic bytecodes, which are discovered during method |
|
50 // scanning. The invokedynamic entries are added at the end of the cpCache. |
|
51 // If there are any invokespecial/InterfaceMethodref special case bytecodes, |
|
52 // these entries are added before invokedynamic entries so that the |
|
53 // invokespecial bytecode 16 bit index doesn't overflow. |
|
54 intStack _invokedynamic_cp_cache_map; |
|
55 |
|
56 // For patching. |
|
57 GrowableArray<address>* _patch_invokedynamic_bcps; |
|
58 GrowableArray<int>* _patch_invokedynamic_refs; |
|
59 |
49 void init_maps(int length) { |
60 void init_maps(int length) { |
50 _cp_map.initialize(length, -1); |
61 _cp_map.initialize(length, -1); |
51 // Choose an initial value large enough that we don't get frequent |
62 // Choose an initial value large enough that we don't get frequent |
52 // calls to grow(). |
63 // calls to grow(). |
53 _cp_cache_map.initialize(length / 2); |
64 _cp_cache_map.initialize(length/2); |
54 // Also cache resolved objects, in another different cache. |
65 // Also cache resolved objects, in another different cache. |
55 _reference_map.initialize(length, -1); |
66 _reference_map.initialize(length, -1); |
56 _resolved_references_map.initialize(length / 2); |
67 _resolved_references_map.initialize(length/2); |
57 _invokedynamic_references_map.initialize(length / 2); |
68 _invokedynamic_references_map.initialize(length/2); |
58 _resolved_reference_limit = -1; |
69 _resolved_reference_limit = -1; |
59 DEBUG_ONLY(_cp_cache_index_limit = -1); |
70 _first_iteration_cp_cache_limit = -1; |
60 } |
71 |
61 |
72 // invokedynamic specific fields |
62 int _cp_cache_index_limit; |
73 _invokedynamic_cp_cache_map.initialize(length/4); |
|
74 _patch_invokedynamic_bcps = new GrowableArray<address>(length/4); |
|
75 _patch_invokedynamic_refs = new GrowableArray<int>(length/4); |
|
76 } |
|
77 |
|
78 int _first_iteration_cp_cache_limit; |
63 void record_map_limits() { |
79 void record_map_limits() { |
64 #ifdef ASSERT |
80 // Record initial size of the two arrays generated for the CP cache |
65 // Record initial size of the two arrays generated for the CP cache: |
81 // relative to walking the constant pool. |
66 _cp_cache_index_limit = _cp_cache_map.length(); |
82 _first_iteration_cp_cache_limit = _cp_cache_map.length(); |
67 #endif //ASSERT |
|
68 _resolved_reference_limit = _resolved_references_map.length(); |
83 _resolved_reference_limit = _resolved_references_map.length(); |
|
84 } |
|
85 |
|
86 int cp_cache_delta() { |
|
87 // How many cp cache entries were added since recording map limits after |
|
88 // cp cache initialization? |
|
89 assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); |
|
90 return _cp_cache_map.length() - _first_iteration_cp_cache_limit; |
69 } |
91 } |
70 |
92 |
71 int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } |
93 int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } |
72 bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } |
94 bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } |
73 |
95 |
|
96 int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { |
|
97 assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); |
|
98 int cache_index = cp_cache_map->append(cp_index); |
|
99 cp_map->at_put(cp_index, cache_index); |
|
100 return cache_index; |
|
101 } |
|
102 |
74 int add_cp_cache_entry(int cp_index) { |
103 int add_cp_cache_entry(int cp_index) { |
75 assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); |
104 assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); |
76 assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); |
105 assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration"); |
77 assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration"); |
106 int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map); |
78 int cache_index = _cp_cache_map.append(cp_index); |
|
79 _cp_map.at_put(cp_index, cache_index); |
|
80 assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); |
107 assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); |
81 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); |
108 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); |
82 return cache_index; |
109 return cache_index; |
83 } |
110 } |
84 |
111 |
85 // add a new CP cache entry beyond the normal cache (for invokedynamic only) |
|
86 int add_invokedynamic_cp_cache_entry(int cp_index) { |
112 int add_invokedynamic_cp_cache_entry(int cp_index) { |
87 assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); |
113 assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); |
88 assert(_cp_map[cp_index] == -1, "do not map from cp_index"); |
114 assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration"); |
89 assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration"); |
115 // add to the invokedynamic index map. |
|
116 int cache_index = _invokedynamic_cp_cache_map.append(cp_index); |
|
117 // do not update _cp_map, since the mapping is one-to-many |
|
118 assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, ""); |
|
119 // this index starts at one but in the bytecode it's appended to the end. |
|
120 return cache_index + _first_iteration_cp_cache_limit; |
|
121 } |
|
122 |
|
123 int invokedynamic_cp_cache_entry_pool_index(int cache_index) { |
|
124 int cp_index = _invokedynamic_cp_cache_map[cache_index]; |
|
125 return cp_index; |
|
126 } |
|
127 |
|
128 // add a new CP cache entry beyond the normal cache for the special case of |
|
129 // invokespecial with InterfaceMethodref as cpool operand. |
|
130 int add_invokespecial_cp_cache_entry(int cp_index) { |
|
131 assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration"); |
|
132 // Don't add InterfaceMethodref if it already exists at the end. |
|
133 for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) { |
|
134 if (cp_cache_entry_pool_index(i) == cp_index) { |
|
135 return i; |
|
136 } |
|
137 } |
90 int cache_index = _cp_cache_map.append(cp_index); |
138 int cache_index = _cp_cache_map.append(cp_index); |
91 assert(cache_index >= _cp_cache_index_limit, ""); |
139 assert(cache_index >= _first_iteration_cp_cache_limit, ""); |
92 // do not update _cp_map, since the mapping is one-to-many |
140 // do not update _cp_map, since the mapping is one-to-many |
93 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); |
141 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); |
94 return cache_index; |
142 return cache_index; |
95 } |
143 } |
96 |
144 |
97 // fix duplicated code later |
|
98 int cp_entry_to_resolved_references(int cp_index) const { |
145 int cp_entry_to_resolved_references(int cp_index) const { |
99 assert(has_entry_in_resolved_references(cp_index), "oob"); |
146 assert(has_entry_in_resolved_references(cp_index), "oob"); |
100 return _reference_map[cp_index]; |
147 return _reference_map[cp_index]; |
101 } |
148 } |
102 bool has_entry_in_resolved_references(int cp_index) const { |
149 bool has_entry_in_resolved_references(int cp_index) const { |
103 return (uint)cp_index < (uint)_reference_map.length() && _reference_map[cp_index] >= 0; |
150 return (uint)cp_index < (uint)_reference_map.length() && _reference_map[cp_index] >= 0; |
104 } |
151 } |
105 |
152 |
106 // add a new entry to the resolved_references map |
153 // add a new entry to the resolved_references map |
107 int add_resolved_references_entry(int cp_index) { |
154 int add_resolved_references_entry(int cp_index) { |
108 assert(_reference_map[cp_index] == -1, "not twice on same cp_index"); |
155 int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); |
109 assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration"); |
|
110 int ref_index = _resolved_references_map.append(cp_index); |
|
111 _reference_map.at_put(cp_index, ref_index); |
|
112 assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); |
156 assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); |
113 return ref_index; |
157 return ref_index; |
114 } |
158 } |
115 |
159 |
116 // add a new entries to the resolved_references map (for invokedynamic and invokehandle only) |
160 // add a new entries to the resolved_references map (for invokedynamic and invokehandle only) |
135 } |
179 } |
136 |
180 |
137 // Access the contents of _cp_cache_map to determine CP cache layout. |
181 // Access the contents of _cp_cache_map to determine CP cache layout. |
138 int cp_cache_entry_pool_index(int cache_index) { |
182 int cp_cache_entry_pool_index(int cache_index) { |
139 int cp_index = _cp_cache_map[cache_index]; |
183 int cp_index = _cp_cache_map[cache_index]; |
140 return cp_index; |
184 return cp_index; |
141 } |
185 } |
142 |
186 |
143 // All the work goes in here: |
187 // All the work goes in here: |
144 Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS); |
188 Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS); |
145 |
189 |
146 void compute_index_maps(); |
190 void compute_index_maps(); |
147 void make_constant_pool_cache(TRAPS); |
191 void make_constant_pool_cache(TRAPS); |
148 void scan_method(Method* m, bool reverse = false); |
192 void scan_method(Method* m, bool reverse, TRAPS); |
149 void rewrite_Object_init(methodHandle m, TRAPS); |
193 void rewrite_Object_init(methodHandle m, TRAPS); |
150 void rewrite_member_reference(address bcp, int offset, bool reverse = false); |
194 void rewrite_member_reference(address bcp, int offset, bool reverse); |
151 void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false); |
195 void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); |
152 void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); |
196 void rewrite_invokedynamic(address bcp, int offset, bool reverse); |
153 void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); |
197 void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse); |
|
198 void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS); |
|
199 |
|
200 void patch_invokedynamic_bytecodes(); |
|
201 |
154 // Revert bytecodes in case of an exception. |
202 // Revert bytecodes in case of an exception. |
155 void restore_bytecodes(); |
203 void restore_bytecodes(TRAPS); |
156 |
204 |
157 static methodHandle rewrite_jsrs(methodHandle m, TRAPS); |
205 static methodHandle rewrite_jsrs(methodHandle m, TRAPS); |
158 public: |
206 public: |
159 // Driver routine: |
207 // Driver routine: |
160 static void rewrite(instanceKlassHandle klass, TRAPS); |
208 static void rewrite(instanceKlassHandle klass, TRAPS); |