Tue, 21 Apr 2009 23:21:04 -0700
6655646: dynamic languages need dynamically linked call sites
Summary: invokedynamic instruction (JSR 292 RI)
Reviewed-by: twisti, never
1 /*
2 * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_cpCacheOop.cpp.incl"
29 // Implememtation of ConstantPoolCacheEntry
31 void ConstantPoolCacheEntry::set_initial_state(int index) {
32 if (constantPoolCacheOopDesc::is_secondary_index(index)) {
33 // Hack: The rewriter is trying to say that this entry itself
34 // will be a secondary entry.
35 int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
36 assert(0 <= main_index && main_index < 0x10000, "sanity check");
37 _indices = (main_index << 16);
38 assert(main_entry_index() == main_index, "");
39 return;
40 }
41 assert(0 < index && index < 0x10000, "sanity check");
42 _indices = index;
43 assert(constant_pool_index() == index, "");
44 }
47 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
48 bool is_vfinal, bool is_volatile,
49 bool is_method_interface, bool is_method) {
50 int f = state;
52 assert( state < number_of_states, "Invalid state in as_flags");
54 f <<= 1;
55 if (is_final) f |= 1;
56 f <<= 1;
57 if (is_vfinal) f |= 1;
58 f <<= 1;
59 if (is_volatile) f |= 1;
60 f <<= 1;
61 if (is_method_interface) f |= 1;
62 f <<= 1;
63 if (is_method) f |= 1;
64 f <<= ConstantPoolCacheEntry::hotSwapBit;
65 // Preserve existing flag bit values
66 #ifdef ASSERT
67 int old_state = ((_flags >> tosBits) & 0x0F);
68 assert(old_state == 0 || old_state == state,
69 "inconsistent cpCache flags state");
70 #endif
71 return (_flags | f) ;
72 }
74 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
75 #ifdef ASSERT
76 // Read once.
77 volatile Bytecodes::Code c = bytecode_1();
78 assert(c == 0 || c == code || code == 0, "update must be consistent");
79 #endif
80 // Need to flush pending stores here before bytecode is written.
81 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
82 }
84 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
85 #ifdef ASSERT
86 // Read once.
87 volatile Bytecodes::Code c = bytecode_2();
88 assert(c == 0 || c == code || code == 0, "update must be consistent");
89 #endif
90 // Need to flush pending stores here before bytecode is written.
91 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
92 }
94 #ifdef ASSERT
95 // It is possible to have two different dummy methodOops created
96 // when the resolve code for invoke interface executes concurrently
97 // Hence the assertion below is weakened a bit for the invokeinterface
98 // case.
99 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
100 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
101 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
102 ((methodOop)f1)->signature());
103 }
104 #endif
106 // Note that concurrent update of both bytecodes can leave one of them
107 // reset to zero. This is harmless; the interpreter will simply re-resolve
108 // the damaged entry. More seriously, the memory synchronization is needed
109 // to flush other fields (f1, f2) completely to memory before the bytecodes
110 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
111 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
112 Bytecodes::Code put_code,
113 KlassHandle field_holder,
114 int orig_field_index,
115 int field_offset,
116 TosState field_type,
117 bool is_final,
118 bool is_volatile) {
119 set_f1(field_holder());
120 set_f2(field_offset);
121 // The field index is used by jvm/ti and is the index into fields() array
122 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
123 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
124 const int field_index = orig_field_index / instanceKlass::next_offset;
125 assert(field_index <= field_index_mask,
126 "field index does not fit in low flag bits");
127 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
128 (field_index & field_index_mask));
129 set_bytecode_1(get_code);
130 set_bytecode_2(put_code);
131 NOT_PRODUCT(verify(tty));
132 }
134 int ConstantPoolCacheEntry::field_index() const {
135 return (_flags & field_index_mask) * instanceKlass::next_offset;
136 }
138 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
139 methodHandle method,
140 int vtable_index) {
142 assert(method->interpreter_entry() != NULL, "should have been set at this point");
143 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
144 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
146 int byte_no = -1;
147 bool needs_vfinal_flag = false;
148 switch (invoke_code) {
149 case Bytecodes::_invokedynamic:
150 case Bytecodes::_invokevirtual:
151 case Bytecodes::_invokeinterface: {
152 if (method->can_be_statically_bound()) {
153 set_f2((intptr_t)method());
154 needs_vfinal_flag = true;
155 } else {
156 assert(vtable_index >= 0, "valid index");
157 set_f2(vtable_index);
158 }
159 byte_no = 2;
160 break;
161 }
162 case Bytecodes::_invokespecial:
163 // Preserve the value of the vfinal flag on invokevirtual bytecode
164 // which may be shared with this constant pool cache entry.
165 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
166 // fall through
167 case Bytecodes::_invokestatic:
168 set_f1(method());
169 byte_no = 1;
170 break;
171 default:
172 ShouldNotReachHere();
173 break;
174 }
176 set_flags(as_flags(as_TosState(method->result_type()),
177 method->is_final_method(),
178 needs_vfinal_flag,
179 false,
180 change_to_virtual,
181 true)|
182 method()->size_of_parameters());
184 // Note: byte_no also appears in TemplateTable::resolve.
185 if (byte_no == 1) {
186 set_bytecode_1(invoke_code);
187 } else if (byte_no == 2) {
188 if (change_to_virtual) {
189 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
190 //
191 // Workaround for the case where we encounter an invokeinterface, but we
192 // should really have an _invokevirtual since the resolved method is a
193 // virtual method in java.lang.Object. This is a corner case in the spec
194 // but is presumably legal. javac does not generate this code.
195 //
196 // We set bytecode_1() to _invokeinterface, because that is the
197 // bytecode # used by the interpreter to see if it is resolved.
198 // We set bytecode_2() to _invokevirtual.
199 // See also interpreterRuntime.cpp. (8/25/2000)
200 // Only set resolved for the invokeinterface case if method is public.
201 // Otherwise, the method needs to be reresolved with caller for each
202 // interface call.
203 if (method->is_public()) set_bytecode_1(invoke_code);
204 set_bytecode_2(Bytecodes::_invokevirtual);
205 } else {
206 set_bytecode_2(invoke_code);
207 }
208 } else {
209 ShouldNotReachHere();
210 }
211 NOT_PRODUCT(verify(tty));
212 }
215 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
216 klassOop interf = method->method_holder();
217 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
218 set_f1(interf);
219 set_f2(index);
220 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
221 set_bytecode_1(Bytecodes::_invokeinterface);
222 }
225 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
226 methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
227 assert(method->is_method(), "must be initialized properly");
228 int param_size = method->size_of_parameters();
229 assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
230 param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
231 if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
232 // racing threads might be trying to install their own favorites
233 set_f1(call_site());
234 }
235 set_f2(extra_data);
236 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size);
237 // do not do set_bytecode on a secondary CP cache entry
238 //set_bytecode_1(Bytecodes::_invokedynamic);
239 }
242 class LocalOopClosure: public OopClosure {
243 private:
244 void (*_f)(oop*);
246 public:
247 LocalOopClosure(void f(oop*)) { _f = f; }
248 virtual void do_oop(oop* o) { _f(o); }
249 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
250 };
253 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
254 LocalOopClosure blk(f);
255 oop_iterate(&blk);
256 }
259 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
260 assert(in_words(size()) == 4, "check code below - may need adjustment");
261 // field[1] is always oop or NULL
262 blk->do_oop((oop*)&_f1);
263 if (is_vfinal()) {
264 blk->do_oop((oop*)&_f2);
265 }
266 }
269 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
270 assert(in_words(size()) == 4, "check code below - may need adjustment");
271 // field[1] is always oop or NULL
272 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
273 if (is_vfinal()) {
274 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
275 }
276 }
279 void ConstantPoolCacheEntry::follow_contents() {
280 assert(in_words(size()) == 4, "check code below - may need adjustment");
281 // field[1] is always oop or NULL
282 MarkSweep::mark_and_push((oop*)&_f1);
283 if (is_vfinal()) {
284 MarkSweep::mark_and_push((oop*)&_f2);
285 }
286 }
288 #ifndef SERIALGC
289 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
290 assert(in_words(size()) == 4, "check code below - may need adjustment");
291 // field[1] is always oop or NULL
292 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
293 if (is_vfinal()) {
294 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
295 }
296 }
297 #endif // SERIALGC
299 void ConstantPoolCacheEntry::adjust_pointers() {
300 assert(in_words(size()) == 4, "check code below - may need adjustment");
301 // field[1] is always oop or NULL
302 MarkSweep::adjust_pointer((oop*)&_f1);
303 if (is_vfinal()) {
304 MarkSweep::adjust_pointer((oop*)&_f2);
305 }
306 }
308 #ifndef SERIALGC
309 void ConstantPoolCacheEntry::update_pointers() {
310 assert(in_words(size()) == 4, "check code below - may need adjustment");
311 // field[1] is always oop or NULL
312 PSParallelCompact::adjust_pointer((oop*)&_f1);
313 if (is_vfinal()) {
314 PSParallelCompact::adjust_pointer((oop*)&_f2);
315 }
316 }
318 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
319 HeapWord* end_addr) {
320 assert(in_words(size()) == 4, "check code below - may need adjustment");
321 // field[1] is always oop or NULL
322 PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
323 if (is_vfinal()) {
324 PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
325 }
326 }
327 #endif // SERIALGC
329 // RedefineClasses() API support:
330 // If this constantPoolCacheEntry refers to old_method then update it
331 // to refer to new_method.
332 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
333 methodOop new_method, bool * trace_name_printed) {
335 if (is_vfinal()) {
336 // virtual and final so f2() contains method ptr instead of vtable index
337 if (f2() == (intptr_t)old_method) {
338 // match old_method so need an update
339 _f2 = (intptr_t)new_method;
340 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
341 if (!(*trace_name_printed)) {
342 // RC_TRACE_MESG macro has an embedded ResourceMark
343 RC_TRACE_MESG(("adjust: name=%s",
344 Klass::cast(old_method->method_holder())->external_name()));
345 *trace_name_printed = true;
346 }
347 // RC_TRACE macro has an embedded ResourceMark
348 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
349 new_method->name()->as_C_string(),
350 new_method->signature()->as_C_string()));
351 }
353 return true;
354 }
356 // f1() is not used with virtual entries so bail out
357 return false;
358 }
360 if ((oop)_f1 == NULL) {
361 // NULL f1() means this is a virtual entry so bail out
362 // We are assuming that the vtable index does not need change.
363 return false;
364 }
366 if ((oop)_f1 == old_method) {
367 _f1 = new_method;
368 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
369 if (!(*trace_name_printed)) {
370 // RC_TRACE_MESG macro has an embedded ResourceMark
371 RC_TRACE_MESG(("adjust: name=%s",
372 Klass::cast(old_method->method_holder())->external_name()));
373 *trace_name_printed = true;
374 }
375 // RC_TRACE macro has an embedded ResourceMark
376 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
377 new_method->name()->as_C_string(),
378 new_method->signature()->as_C_string()));
379 }
381 return true;
382 }
384 return false;
385 }
387 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
388 if (!is_method_entry()) {
389 // not a method entry so not interesting by default
390 return false;
391 }
393 methodOop m = NULL;
394 if (is_vfinal()) {
395 // virtual and final so _f2 contains method ptr instead of vtable index
396 m = (methodOop)_f2;
397 } else if ((oop)_f1 == NULL) {
398 // NULL _f1 means this is a virtual entry so also not interesting
399 return false;
400 } else {
401 if (!((oop)_f1)->is_method()) {
402 // _f1 can also contain a klassOop for an interface
403 return false;
404 }
405 m = (methodOop)_f1;
406 }
408 assert(m != NULL && m->is_method(), "sanity check");
409 if (m == NULL || !m->is_method() || m->method_holder() != k) {
410 // robustness for above sanity checks or method is not in
411 // the interesting class
412 return false;
413 }
415 // the method is in the interesting class so the entry is interesting
416 return true;
417 }
419 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
420 // print separator
421 if (index == 0) tty->print_cr(" -------------");
422 // print entry
423 tty->print_cr("%3d (%08x) ", index, this);
424 if (is_secondary_entry())
425 tty->print_cr("[%5d|secondary]", main_entry_index());
426 else
427 tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
428 tty->print_cr(" [ %08x]", (address)(oop)_f1);
429 tty->print_cr(" [ %08x]", _f2);
430 tty->print_cr(" [ %08x]", _flags);
431 tty->print_cr(" -------------");
432 }
434 void ConstantPoolCacheEntry::verify(outputStream* st) const {
435 // not implemented yet
436 }
438 // Implementation of ConstantPoolCache
440 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
441 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
442 for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
443 }
445 // RedefineClasses() API support:
446 // If any entry of this constantPoolCache points to any of
447 // old_methods, replace it with the corresponding new_method.
448 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
449 int methods_length, bool * trace_name_printed) {
451 if (methods_length == 0) {
452 // nothing to do if there are no methods
453 return;
454 }
456 // get shorthand for the interesting class
457 klassOop old_holder = old_methods[0]->method_holder();
459 for (int i = 0; i < length(); i++) {
460 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
461 // skip uninteresting methods
462 continue;
463 }
465 // The constantPoolCache contains entries for several different
466 // things, but we only care about methods. In fact, we only care
467 // about methods in the same class as the one that contains the
468 // old_methods. At this point, we have an interesting entry.
470 for (int j = 0; j < methods_length; j++) {
471 methodOop old_method = old_methods[j];
472 methodOop new_method = new_methods[j];
474 if (entry_at(i)->adjust_method_entry(old_method, new_method,
475 trace_name_printed)) {
476 // current old_method matched this entry and we updated it so
477 // break out and get to the next interesting entry if there one
478 break;
479 }
480 }
481 }
482 }