Thu, 21 Apr 2011 00:25:40 -0700
6993078: JSR 292 too many pushes: Lesp points into register window
Reviewed-by: kvn, never
1 /*
2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/markSweep.inline.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/rewriter.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/cpCacheOop.hpp"
31 #include "oops/objArrayOop.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/handles.inline.hpp"
37 // Implememtation of ConstantPoolCacheEntry
39 void ConstantPoolCacheEntry::initialize_entry(int index) {
40 assert(0 < index && index < 0x10000, "sanity check");
41 _indices = index;
42 assert(constant_pool_index() == index, "");
43 }
45 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
46 assert(0 <= main_index && main_index < 0x10000, "sanity check");
47 _indices = (main_index << 16);
48 assert(main_entry_index() == main_index, "");
49 }
51 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
52 bool is_vfinal, bool is_volatile,
53 bool is_method_interface, bool is_method) {
54 int f = state;
56 assert( state < number_of_states, "Invalid state in as_flags");
58 f <<= 1;
59 if (is_final) f |= 1;
60 f <<= 1;
61 if (is_vfinal) f |= 1;
62 f <<= 1;
63 if (is_volatile) f |= 1;
64 f <<= 1;
65 if (is_method_interface) f |= 1;
66 f <<= 1;
67 if (is_method) f |= 1;
68 f <<= ConstantPoolCacheEntry::hotSwapBit;
69 // Preserve existing flag bit values
70 #ifdef ASSERT
71 int old_state = ((_flags >> tosBits) & 0x0F);
72 assert(old_state == 0 || old_state == state,
73 "inconsistent cpCache flags state");
74 #endif
75 return (_flags | f) ;
76 }
78 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
79 #ifdef ASSERT
80 // Read once.
81 volatile Bytecodes::Code c = bytecode_1();
82 assert(c == 0 || c == code || code == 0, "update must be consistent");
83 #endif
84 // Need to flush pending stores here before bytecode is written.
85 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
86 }
88 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
89 #ifdef ASSERT
90 // Read once.
91 volatile Bytecodes::Code c = bytecode_2();
92 assert(c == 0 || c == code || code == 0, "update must be consistent");
93 #endif
94 // Need to flush pending stores here before bytecode is written.
95 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
96 }
98 // Atomically sets f1 if it is still NULL, otherwise it keeps the
99 // current value.
100 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
101 // Use barriers as in oop_store
102 oop* f1_addr = (oop*) &_f1;
103 update_barrier_set_pre(f1_addr, f1);
104 void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
105 bool success = (result == NULL);
106 if (success) {
107 update_barrier_set((void*) f1_addr, f1);
108 }
109 }
111 #ifdef ASSERT
112 // It is possible to have two different dummy methodOops created
113 // when the resolve code for invoke interface executes concurrently
114 // Hence the assertion below is weakened a bit for the invokeinterface
115 // case.
116 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
117 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
118 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
119 ((methodOop)f1)->signature());
120 }
121 #endif
123 // Note that concurrent update of both bytecodes can leave one of them
124 // reset to zero. This is harmless; the interpreter will simply re-resolve
125 // the damaged entry. More seriously, the memory synchronization is needed
126 // to flush other fields (f1, f2) completely to memory before the bytecodes
127 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
128 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
129 Bytecodes::Code put_code,
130 KlassHandle field_holder,
131 int orig_field_index,
132 int field_offset,
133 TosState field_type,
134 bool is_final,
135 bool is_volatile) {
136 set_f1(field_holder()->java_mirror());
137 set_f2(field_offset);
138 // The field index is used by jvm/ti and is the index into fields() array
139 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
140 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
141 const int field_index = orig_field_index / instanceKlass::next_offset;
142 assert(field_index <= field_index_mask,
143 "field index does not fit in low flag bits");
144 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
145 (field_index & field_index_mask));
146 set_bytecode_1(get_code);
147 set_bytecode_2(put_code);
148 NOT_PRODUCT(verify(tty));
149 }
151 int ConstantPoolCacheEntry::field_index() const {
152 return (_flags & field_index_mask) * instanceKlass::next_offset;
153 }
155 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
156 methodHandle method,
157 int vtable_index) {
158 assert(!is_secondary_entry(), "");
159 assert(method->interpreter_entry() != NULL, "should have been set at this point");
160 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
161 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
163 int byte_no = -1;
164 bool needs_vfinal_flag = false;
165 switch (invoke_code) {
166 case Bytecodes::_invokevirtual:
167 case Bytecodes::_invokeinterface: {
168 if (method->can_be_statically_bound()) {
169 set_f2((intptr_t)method());
170 needs_vfinal_flag = true;
171 } else {
172 assert(vtable_index >= 0, "valid index");
173 set_f2(vtable_index);
174 }
175 byte_no = 2;
176 break;
177 }
179 case Bytecodes::_invokedynamic: // similar to _invokevirtual
180 if (TraceInvokeDynamic) {
181 tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
182 (is_secondary_entry() ? " secondary" : ""),
183 (intptr_t)method(), vtable_index);
184 method->print();
185 this->print(tty, 0);
186 }
187 assert(method->can_be_statically_bound(), "must be a MH invoker method");
188 assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
189 // SystemDictionary::find_method_handle_invoke only caches
190 // methods which signature classes are on the boot classpath,
191 // otherwise the newly created method is returned. To avoid
192 // races in that case we store the first one coming in into the
193 // cp-cache atomically if it's still unset.
194 set_f1_if_null_atomic(method());
195 needs_vfinal_flag = false; // _f2 is not an oop
196 assert(!is_vfinal(), "f2 not an oop");
197 byte_no = 1; // coordinate this with bytecode_number & is_resolved
198 break;
200 case Bytecodes::_invokespecial:
201 // Preserve the value of the vfinal flag on invokevirtual bytecode
202 // which may be shared with this constant pool cache entry.
203 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
204 // fall through
205 case Bytecodes::_invokestatic:
206 set_f1(method());
207 byte_no = 1;
208 break;
209 default:
210 ShouldNotReachHere();
211 break;
212 }
214 set_flags(as_flags(as_TosState(method->result_type()),
215 method->is_final_method(),
216 needs_vfinal_flag,
217 false,
218 change_to_virtual,
219 true)|
220 method()->size_of_parameters());
222 // Note: byte_no also appears in TemplateTable::resolve.
223 if (byte_no == 1) {
224 set_bytecode_1(invoke_code);
225 } else if (byte_no == 2) {
226 if (change_to_virtual) {
227 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
228 //
229 // Workaround for the case where we encounter an invokeinterface, but we
230 // should really have an _invokevirtual since the resolved method is a
231 // virtual method in java.lang.Object. This is a corner case in the spec
232 // but is presumably legal. javac does not generate this code.
233 //
234 // We set bytecode_1() to _invokeinterface, because that is the
235 // bytecode # used by the interpreter to see if it is resolved.
236 // We set bytecode_2() to _invokevirtual.
237 // See also interpreterRuntime.cpp. (8/25/2000)
238 // Only set resolved for the invokeinterface case if method is public.
239 // Otherwise, the method needs to be reresolved with caller for each
240 // interface call.
241 if (method->is_public()) set_bytecode_1(invoke_code);
242 set_bytecode_2(Bytecodes::_invokevirtual);
243 } else {
244 set_bytecode_2(invoke_code);
245 }
246 } else {
247 ShouldNotReachHere();
248 }
249 NOT_PRODUCT(verify(tty));
250 }
253 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
254 assert(!is_secondary_entry(), "");
255 klassOop interf = method->method_holder();
256 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
257 set_f1(interf);
258 set_f2(index);
259 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
260 set_bytecode_1(Bytecodes::_invokeinterface);
261 }
264 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
265 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
266 assert(_f2 == 0, "initialize once");
267 assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
268 set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
269 }
271 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
272 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
273 intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
274 assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
275 return (int) bsm_cache_index;
276 }
278 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
279 assert(is_secondary_entry(), "");
280 // NOTE: it's important that all other values are set before f1 is
281 // set since some users short circuit on f1 being set
282 // (i.e. non-null) and that may result in uninitialized values for
283 // other racing threads (e.g. flags).
284 int param_size = signature_invoker->size_of_parameters();
285 assert(param_size >= 1, "method argument size must include MH.this");
286 param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
287 bool is_final = true;
288 assert(signature_invoker->is_final_method(), "is_final");
289 int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
290 assert(_flags == 0 || _flags == flags, "flags should be the same");
291 set_flags(flags);
292 // do not do set_bytecode on a secondary CP cache entry
293 //set_bytecode_1(Bytecodes::_invokedynamic);
294 set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)!
295 }
298 class LocalOopClosure: public OopClosure {
299 private:
300 void (*_f)(oop*);
302 public:
303 LocalOopClosure(void f(oop*)) { _f = f; }
304 virtual void do_oop(oop* o) { _f(o); }
305 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
306 };
309 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
310 LocalOopClosure blk(f);
311 oop_iterate(&blk);
312 }
315 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
316 assert(in_words(size()) == 4, "check code below - may need adjustment");
317 // field[1] is always oop or NULL
318 blk->do_oop((oop*)&_f1);
319 if (is_vfinal()) {
320 blk->do_oop((oop*)&_f2);
321 }
322 }
325 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
326 assert(in_words(size()) == 4, "check code below - may need adjustment");
327 // field[1] is always oop or NULL
328 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
329 if (is_vfinal()) {
330 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
331 }
332 }
335 void ConstantPoolCacheEntry::follow_contents() {
336 assert(in_words(size()) == 4, "check code below - may need adjustment");
337 // field[1] is always oop or NULL
338 MarkSweep::mark_and_push((oop*)&_f1);
339 if (is_vfinal()) {
340 MarkSweep::mark_and_push((oop*)&_f2);
341 }
342 }
344 #ifndef SERIALGC
345 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
346 assert(in_words(size()) == 4, "check code below - may need adjustment");
347 // field[1] is always oop or NULL
348 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
349 if (is_vfinal()) {
350 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
351 }
352 }
353 #endif // SERIALGC
355 void ConstantPoolCacheEntry::adjust_pointers() {
356 assert(in_words(size()) == 4, "check code below - may need adjustment");
357 // field[1] is always oop or NULL
358 MarkSweep::adjust_pointer((oop*)&_f1);
359 if (is_vfinal()) {
360 MarkSweep::adjust_pointer((oop*)&_f2);
361 }
362 }
364 #ifndef SERIALGC
365 void ConstantPoolCacheEntry::update_pointers() {
366 assert(in_words(size()) == 4, "check code below - may need adjustment");
367 // field[1] is always oop or NULL
368 PSParallelCompact::adjust_pointer((oop*)&_f1);
369 if (is_vfinal()) {
370 PSParallelCompact::adjust_pointer((oop*)&_f2);
371 }
372 }
373 #endif // SERIALGC
375 // RedefineClasses() API support:
376 // If this constantPoolCacheEntry refers to old_method then update it
377 // to refer to new_method.
378 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
379 methodOop new_method, bool * trace_name_printed) {
381 if (is_vfinal()) {
382 // virtual and final so f2() contains method ptr instead of vtable index
383 if (f2() == (intptr_t)old_method) {
384 // match old_method so need an update
385 _f2 = (intptr_t)new_method;
386 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
387 if (!(*trace_name_printed)) {
388 // RC_TRACE_MESG macro has an embedded ResourceMark
389 RC_TRACE_MESG(("adjust: name=%s",
390 Klass::cast(old_method->method_holder())->external_name()));
391 *trace_name_printed = true;
392 }
393 // RC_TRACE macro has an embedded ResourceMark
394 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
395 new_method->name()->as_C_string(),
396 new_method->signature()->as_C_string()));
397 }
399 return true;
400 }
402 // f1() is not used with virtual entries so bail out
403 return false;
404 }
406 if ((oop)_f1 == NULL) {
407 // NULL f1() means this is a virtual entry so bail out
408 // We are assuming that the vtable index does not need change.
409 return false;
410 }
412 if ((oop)_f1 == old_method) {
413 _f1 = new_method;
414 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
415 if (!(*trace_name_printed)) {
416 // RC_TRACE_MESG macro has an embedded ResourceMark
417 RC_TRACE_MESG(("adjust: name=%s",
418 Klass::cast(old_method->method_holder())->external_name()));
419 *trace_name_printed = true;
420 }
421 // RC_TRACE macro has an embedded ResourceMark
422 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
423 new_method->name()->as_C_string(),
424 new_method->signature()->as_C_string()));
425 }
427 return true;
428 }
430 return false;
431 }
433 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
434 if (!is_method_entry()) {
435 // not a method entry so not interesting by default
436 return false;
437 }
439 methodOop m = NULL;
440 if (is_vfinal()) {
441 // virtual and final so _f2 contains method ptr instead of vtable index
442 m = (methodOop)_f2;
443 } else if ((oop)_f1 == NULL) {
444 // NULL _f1 means this is a virtual entry so also not interesting
445 return false;
446 } else {
447 if (!((oop)_f1)->is_method()) {
448 // _f1 can also contain a klassOop for an interface
449 return false;
450 }
451 m = (methodOop)_f1;
452 }
454 assert(m != NULL && m->is_method(), "sanity check");
455 if (m == NULL || !m->is_method() || m->method_holder() != k) {
456 // robustness for above sanity checks or method is not in
457 // the interesting class
458 return false;
459 }
461 // the method is in the interesting class so the entry is interesting
462 return true;
463 }
465 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
466 // print separator
467 if (index == 0) tty->print_cr(" -------------");
468 // print entry
469 tty->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this);
470 if (is_secondary_entry())
471 tty->print_cr("[%5d|secondary]", main_entry_index());
472 else
473 tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
474 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)(oop)_f1);
475 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2);
476 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags);
477 tty->print_cr(" -------------");
478 }
480 void ConstantPoolCacheEntry::verify(outputStream* st) const {
481 // not implemented yet
482 }
484 // Implementation of ConstantPoolCache
486 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
487 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
488 for (int i = 0; i < length(); i++) {
489 ConstantPoolCacheEntry* e = entry_at(i);
490 int original_index = inverse_index_map[i];
491 if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
492 int main_index = (original_index - Rewriter::_secondary_entry_tag);
493 assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
494 e->initialize_secondary_entry(main_index);
495 } else {
496 e->initialize_entry(original_index);
497 }
498 assert(entry_at(i) == e, "sanity");
499 }
500 }
502 // RedefineClasses() API support:
503 // If any entry of this constantPoolCache points to any of
504 // old_methods, replace it with the corresponding new_method.
505 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
506 int methods_length, bool * trace_name_printed) {
508 if (methods_length == 0) {
509 // nothing to do if there are no methods
510 return;
511 }
513 // get shorthand for the interesting class
514 klassOop old_holder = old_methods[0]->method_holder();
516 for (int i = 0; i < length(); i++) {
517 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
518 // skip uninteresting methods
519 continue;
520 }
522 // The constantPoolCache contains entries for several different
523 // things, but we only care about methods. In fact, we only care
524 // about methods in the same class as the one that contains the
525 // old_methods. At this point, we have an interesting entry.
527 for (int j = 0; j < methods_length; j++) {
528 methodOop old_method = old_methods[j];
529 methodOop new_method = new_methods[j];
531 if (entry_at(i)->adjust_method_entry(old_method, new_method,
532 trace_name_printed)) {
533 // current old_method matched this entry and we updated it so
534 // break out and get to the next interesting entry if there one
535 break;
536 }
537 }
538 }
539 }