Thu, 12 Mar 2009 18:16:36 -0700
Merge
1 /*
2 * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_cpCacheOop.cpp.incl"
29 // Implememtation of ConstantPoolCacheEntry
31 void ConstantPoolCacheEntry::set_initial_state(int index) {
32 assert(0 <= index && index < 0x10000, "sanity check");
33 _indices = index;
34 }
37 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
38 bool is_vfinal, bool is_volatile,
39 bool is_method_interface, bool is_method) {
40 int f = state;
42 assert( state < number_of_states, "Invalid state in as_flags");
44 f <<= 1;
45 if (is_final) f |= 1;
46 f <<= 1;
47 if (is_vfinal) f |= 1;
48 f <<= 1;
49 if (is_volatile) f |= 1;
50 f <<= 1;
51 if (is_method_interface) f |= 1;
52 f <<= 1;
53 if (is_method) f |= 1;
54 f <<= ConstantPoolCacheEntry::hotSwapBit;
55 // Preserve existing flag bit values
56 #ifdef ASSERT
57 int old_state = ((_flags >> tosBits) & 0x0F);
58 assert(old_state == 0 || old_state == state,
59 "inconsistent cpCache flags state");
60 #endif
61 return (_flags | f) ;
62 }
64 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
65 #ifdef ASSERT
66 // Read once.
67 volatile Bytecodes::Code c = bytecode_1();
68 assert(c == 0 || c == code || code == 0, "update must be consistent");
69 #endif
70 // Need to flush pending stores here before bytecode is written.
71 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
72 }
74 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
75 #ifdef ASSERT
76 // Read once.
77 volatile Bytecodes::Code c = bytecode_2();
78 assert(c == 0 || c == code || code == 0, "update must be consistent");
79 #endif
80 // Need to flush pending stores here before bytecode is written.
81 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
82 }
84 #ifdef ASSERT
85 // It is possible to have two different dummy methodOops created
86 // when the resolve code for invoke interface executes concurrently
87 // Hence the assertion below is weakened a bit for the invokeinterface
88 // case.
89 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
90 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
91 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
92 ((methodOop)f1)->signature());
93 }
94 #endif
96 // Note that concurrent update of both bytecodes can leave one of them
97 // reset to zero. This is harmless; the interpreter will simply re-resolve
98 // the damaged entry. More seriously, the memory synchronization is needed
99 // to flush other fields (f1, f2) completely to memory before the bytecodes
100 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
101 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
102 Bytecodes::Code put_code,
103 KlassHandle field_holder,
104 int orig_field_index,
105 int field_offset,
106 TosState field_type,
107 bool is_final,
108 bool is_volatile) {
109 set_f1(field_holder());
110 set_f2(field_offset);
111 // The field index is used by jvm/ti and is the index into fields() array
112 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
113 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
114 const int field_index = orig_field_index / instanceKlass::next_offset;
115 assert(field_index <= field_index_mask,
116 "field index does not fit in low flag bits");
117 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
118 (field_index & field_index_mask));
119 set_bytecode_1(get_code);
120 set_bytecode_2(put_code);
121 NOT_PRODUCT(verify(tty));
122 }
124 int ConstantPoolCacheEntry::field_index() const {
125 return (_flags & field_index_mask) * instanceKlass::next_offset;
126 }
128 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
129 methodHandle method,
130 int vtable_index) {
132 assert(method->interpreter_entry() != NULL, "should have been set at this point");
133 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
134 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
136 int byte_no = -1;
137 bool needs_vfinal_flag = false;
138 switch (invoke_code) {
139 case Bytecodes::_invokevirtual:
140 case Bytecodes::_invokeinterface: {
141 if (method->can_be_statically_bound()) {
142 set_f2((intptr_t)method());
143 needs_vfinal_flag = true;
144 } else {
145 assert(vtable_index >= 0, "valid index");
146 set_f2(vtable_index);
147 }
148 byte_no = 2;
149 break;
150 }
151 case Bytecodes::_invokespecial:
152 // Preserve the value of the vfinal flag on invokevirtual bytecode
153 // which may be shared with this constant pool cache entry.
154 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
155 // fall through
156 case Bytecodes::_invokestatic:
157 set_f1(method());
158 byte_no = 1;
159 break;
160 default:
161 ShouldNotReachHere();
162 break;
163 }
165 set_flags(as_flags(as_TosState(method->result_type()),
166 method->is_final_method(),
167 needs_vfinal_flag,
168 false,
169 change_to_virtual,
170 true)|
171 method()->size_of_parameters());
173 // Note: byte_no also appears in TemplateTable::resolve.
174 if (byte_no == 1) {
175 set_bytecode_1(invoke_code);
176 } else if (byte_no == 2) {
177 if (change_to_virtual) {
178 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
179 //
180 // Workaround for the case where we encounter an invokeinterface, but we
181 // should really have an _invokevirtual since the resolved method is a
182 // virtual method in java.lang.Object. This is a corner case in the spec
183 // but is presumably legal. javac does not generate this code.
184 //
185 // We set bytecode_1() to _invokeinterface, because that is the
186 // bytecode # used by the interpreter to see if it is resolved.
187 // We set bytecode_2() to _invokevirtual.
188 // See also interpreterRuntime.cpp. (8/25/2000)
189 // Only set resolved for the invokeinterface case if method is public.
190 // Otherwise, the method needs to be reresolved with caller for each
191 // interface call.
192 if (method->is_public()) set_bytecode_1(invoke_code);
193 set_bytecode_2(Bytecodes::_invokevirtual);
194 } else {
195 set_bytecode_2(invoke_code);
196 }
197 } else {
198 ShouldNotReachHere();
199 }
200 NOT_PRODUCT(verify(tty));
201 }
204 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
205 klassOop interf = method->method_holder();
206 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
207 set_f1(interf);
208 set_f2(index);
209 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
210 set_bytecode_1(Bytecodes::_invokeinterface);
211 }
214 class LocalOopClosure: public OopClosure {
215 private:
216 void (*_f)(oop*);
218 public:
219 LocalOopClosure(void f(oop*)) { _f = f; }
220 virtual void do_oop(oop* o) { _f(o); }
221 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
222 };
225 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
226 LocalOopClosure blk(f);
227 oop_iterate(&blk);
228 }
231 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
232 assert(in_words(size()) == 4, "check code below - may need adjustment");
233 // field[1] is always oop or NULL
234 blk->do_oop((oop*)&_f1);
235 if (is_vfinal()) {
236 blk->do_oop((oop*)&_f2);
237 }
238 }
241 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
242 assert(in_words(size()) == 4, "check code below - may need adjustment");
243 // field[1] is always oop or NULL
244 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
245 if (is_vfinal()) {
246 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
247 }
248 }
251 void ConstantPoolCacheEntry::follow_contents() {
252 assert(in_words(size()) == 4, "check code below - may need adjustment");
253 // field[1] is always oop or NULL
254 MarkSweep::mark_and_push((oop*)&_f1);
255 if (is_vfinal()) {
256 MarkSweep::mark_and_push((oop*)&_f2);
257 }
258 }
260 #ifndef SERIALGC
261 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
262 assert(in_words(size()) == 4, "check code below - may need adjustment");
263 // field[1] is always oop or NULL
264 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
265 if (is_vfinal()) {
266 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
267 }
268 }
269 #endif // SERIALGC
271 void ConstantPoolCacheEntry::adjust_pointers() {
272 assert(in_words(size()) == 4, "check code below - may need adjustment");
273 // field[1] is always oop or NULL
274 MarkSweep::adjust_pointer((oop*)&_f1);
275 if (is_vfinal()) {
276 MarkSweep::adjust_pointer((oop*)&_f2);
277 }
278 }
280 #ifndef SERIALGC
281 void ConstantPoolCacheEntry::update_pointers() {
282 assert(in_words(size()) == 4, "check code below - may need adjustment");
283 // field[1] is always oop or NULL
284 PSParallelCompact::adjust_pointer((oop*)&_f1);
285 if (is_vfinal()) {
286 PSParallelCompact::adjust_pointer((oop*)&_f2);
287 }
288 }
290 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
291 HeapWord* end_addr) {
292 assert(in_words(size()) == 4, "check code below - may need adjustment");
293 // field[1] is always oop or NULL
294 PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
295 if (is_vfinal()) {
296 PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
297 }
298 }
299 #endif // SERIALGC
301 // RedefineClasses() API support:
302 // If this constantPoolCacheEntry refers to old_method then update it
303 // to refer to new_method.
304 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
305 methodOop new_method, bool * trace_name_printed) {
307 if (is_vfinal()) {
308 // virtual and final so f2() contains method ptr instead of vtable index
309 if (f2() == (intptr_t)old_method) {
310 // match old_method so need an update
311 _f2 = (intptr_t)new_method;
312 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
313 if (!(*trace_name_printed)) {
314 // RC_TRACE_MESG macro has an embedded ResourceMark
315 RC_TRACE_MESG(("adjust: name=%s",
316 Klass::cast(old_method->method_holder())->external_name()));
317 *trace_name_printed = true;
318 }
319 // RC_TRACE macro has an embedded ResourceMark
320 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
321 new_method->name()->as_C_string(),
322 new_method->signature()->as_C_string()));
323 }
325 return true;
326 }
328 // f1() is not used with virtual entries so bail out
329 return false;
330 }
332 if ((oop)_f1 == NULL) {
333 // NULL f1() means this is a virtual entry so bail out
334 // We are assuming that the vtable index does not need change.
335 return false;
336 }
338 if ((oop)_f1 == old_method) {
339 _f1 = new_method;
340 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
341 if (!(*trace_name_printed)) {
342 // RC_TRACE_MESG macro has an embedded ResourceMark
343 RC_TRACE_MESG(("adjust: name=%s",
344 Klass::cast(old_method->method_holder())->external_name()));
345 *trace_name_printed = true;
346 }
347 // RC_TRACE macro has an embedded ResourceMark
348 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
349 new_method->name()->as_C_string(),
350 new_method->signature()->as_C_string()));
351 }
353 return true;
354 }
356 return false;
357 }
359 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
360 if (!is_method_entry()) {
361 // not a method entry so not interesting by default
362 return false;
363 }
365 methodOop m = NULL;
366 if (is_vfinal()) {
367 // virtual and final so _f2 contains method ptr instead of vtable index
368 m = (methodOop)_f2;
369 } else if ((oop)_f1 == NULL) {
370 // NULL _f1 means this is a virtual entry so also not interesting
371 return false;
372 } else {
373 if (!((oop)_f1)->is_method()) {
374 // _f1 can also contain a klassOop for an interface
375 return false;
376 }
377 m = (methodOop)_f1;
378 }
380 assert(m != NULL && m->is_method(), "sanity check");
381 if (m == NULL || !m->is_method() || m->method_holder() != k) {
382 // robustness for above sanity checks or method is not in
383 // the interesting class
384 return false;
385 }
387 // the method is in the interesting class so the entry is interesting
388 return true;
389 }
391 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
392 // print separator
393 if (index == 0) tty->print_cr(" -------------");
394 // print entry
395 tty->print_cr("%3d (%08x) [%02x|%02x|%5d]", index, this, bytecode_2(), bytecode_1(), constant_pool_index());
396 tty->print_cr(" [ %08x]", (address)(oop)_f1);
397 tty->print_cr(" [ %08x]", _f2);
398 tty->print_cr(" [ %08x]", _flags);
399 tty->print_cr(" -------------");
400 }
402 void ConstantPoolCacheEntry::verify(outputStream* st) const {
403 // not implemented yet
404 }
406 // Implementation of ConstantPoolCache
408 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
409 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
410 for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
411 }
413 // RedefineClasses() API support:
414 // If any entry of this constantPoolCache points to any of
415 // old_methods, replace it with the corresponding new_method.
416 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
417 int methods_length, bool * trace_name_printed) {
419 if (methods_length == 0) {
420 // nothing to do if there are no methods
421 return;
422 }
424 // get shorthand for the interesting class
425 klassOop old_holder = old_methods[0]->method_holder();
427 for (int i = 0; i < length(); i++) {
428 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
429 // skip uninteresting methods
430 continue;
431 }
433 // The constantPoolCache contains entries for several different
434 // things, but we only care about methods. In fact, we only care
435 // about methods in the same class as the one that contains the
436 // old_methods. At this point, we have an interesting entry.
438 for (int j = 0; j < methods_length; j++) {
439 methodOop old_method = old_methods[j];
440 methodOop new_method = new_methods[j];
442 if (entry_at(i)->adjust_method_entry(old_method, new_method,
443 trace_name_printed)) {
444 // current old_method matched this entry and we updated it so
445 // break out and get to the next interesting entry if there one
446 break;
447 }
448 }
449 }
450 }