duke@435: /* twisti@2258: * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/shared/markSweep.inline.hpp" stefank@2314: #include "interpreter/interpreter.hpp" stefank@2314: #include "interpreter/rewriter.hpp" stefank@2314: #include "memory/universe.inline.hpp" stefank@2314: #include "oops/cpCacheOop.hpp" stefank@2314: #include "oops/objArrayOop.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "prims/jvmtiRedefineClassesTrace.hpp" stefank@2314: #include "runtime/handles.inline.hpp" duke@435: duke@435: duke@435: // Implememtation of ConstantPoolCacheEntry duke@435: jrose@1494: void ConstantPoolCacheEntry::initialize_entry(int index) { jrose@1161: assert(0 < index && index < 0x10000, "sanity check"); duke@435: _indices = index; jrose@1161: assert(constant_pool_index() == index, ""); duke@435: } duke@435: jrose@1494: void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) { jrose@1494: assert(0 <= main_index && main_index < 0x10000, "sanity check"); jrose@1494: _indices = (main_index << 16); jrose@1494: assert(main_entry_index() == main_index, ""); jrose@1494: } duke@435: duke@435: int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, duke@435: bool is_vfinal, bool is_volatile, duke@435: bool is_method_interface, bool is_method) { duke@435: int f = state; duke@435: duke@435: assert( state < number_of_states, "Invalid state in as_flags"); duke@435: duke@435: f <<= 1; duke@435: if (is_final) f |= 1; duke@435: f <<= 1; duke@435: if (is_vfinal) f |= 1; duke@435: f <<= 1; duke@435: if (is_volatile) f |= 1; duke@435: f <<= 1; duke@435: if (is_method_interface) f |= 1; duke@435: f <<= 1; duke@435: if (is_method) f |= 1; duke@435: f <<= ConstantPoolCacheEntry::hotSwapBit; duke@435: // Preserve existing flag bit values duke@435: #ifdef ASSERT duke@435: int old_state = ((_flags >> tosBits) & 0x0F); duke@435: assert(old_state == 0 || old_state == state, duke@435: "inconsistent cpCache flags state"); duke@435: #endif duke@435: return (_flags | f) ; duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { duke@435: #ifdef ASSERT duke@435: // Read once. duke@435: volatile Bytecodes::Code c = bytecode_1(); duke@435: assert(c == 0 || c == code || code == 0, "update must be consistent"); duke@435: #endif duke@435: // Need to flush pending stores here before bytecode is written. duke@435: OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16)); duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { duke@435: #ifdef ASSERT duke@435: // Read once. duke@435: volatile Bytecodes::Code c = bytecode_2(); duke@435: assert(c == 0 || c == code || code == 0, "update must be consistent"); duke@435: #endif duke@435: // Need to flush pending stores here before bytecode is written. duke@435: OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24)); duke@435: } duke@435: twisti@2258: // Atomically sets f1 if it is still NULL, otherwise it keeps the twisti@2258: // current value. twisti@2258: void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) { twisti@2258: // Use barriers as in oop_store twisti@2258: HeapWord* f1_addr = (HeapWord*) &_f1; twisti@2258: update_barrier_set_pre(f1_addr, f1); twisti@2258: void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); twisti@2258: bool success = (result == NULL); twisti@2258: if (success) { twisti@2258: update_barrier_set((void*) f1_addr, f1); twisti@2258: } twisti@2258: } twisti@2258: duke@435: #ifdef ASSERT duke@435: // It is possible to have two different dummy methodOops created duke@435: // when the resolve code for invoke interface executes concurrently duke@435: // Hence the assertion below is weakened a bit for the invokeinterface duke@435: // case. duke@435: bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) { duke@435: return (cur_f1 == f1 || ((methodOop)cur_f1)->name() == duke@435: ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() == duke@435: ((methodOop)f1)->signature()); duke@435: } duke@435: #endif duke@435: duke@435: // Note that concurrent update of both bytecodes can leave one of them duke@435: // reset to zero. This is harmless; the interpreter will simply re-resolve duke@435: // the damaged entry. More seriously, the memory synchronization is needed duke@435: // to flush other fields (f1, f2) completely to memory before the bytecodes duke@435: // are updated, lest other processors see a non-zero bytecode but zero f1/f2. duke@435: void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, duke@435: Bytecodes::Code put_code, duke@435: KlassHandle field_holder, duke@435: int orig_field_index, duke@435: int field_offset, duke@435: TosState field_type, duke@435: bool is_final, duke@435: bool is_volatile) { duke@435: set_f1(field_holder()); duke@435: set_f2(field_offset); duke@435: // The field index is used by jvm/ti and is the index into fields() array duke@435: // in holder instanceKlass. This is scaled by instanceKlass::next_offset. duke@435: assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index"); duke@435: const int field_index = orig_field_index / instanceKlass::next_offset; duke@435: assert(field_index <= field_index_mask, duke@435: "field index does not fit in low flag bits"); duke@435: set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | duke@435: (field_index & field_index_mask)); duke@435: set_bytecode_1(get_code); duke@435: set_bytecode_2(put_code); duke@435: NOT_PRODUCT(verify(tty)); duke@435: } duke@435: duke@435: int ConstantPoolCacheEntry::field_index() const { duke@435: return (_flags & field_index_mask) * instanceKlass::next_offset; duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, duke@435: methodHandle method, duke@435: int vtable_index) { jrose@2015: assert(!is_secondary_entry(), ""); duke@435: assert(method->interpreter_entry() != NULL, "should have been set at this point"); duke@435: assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); duke@435: bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); duke@435: duke@435: int byte_no = -1; duke@435: bool needs_vfinal_flag = false; duke@435: switch (invoke_code) { duke@435: case Bytecodes::_invokevirtual: duke@435: case Bytecodes::_invokeinterface: { duke@435: if (method->can_be_statically_bound()) { duke@435: set_f2((intptr_t)method()); duke@435: needs_vfinal_flag = true; duke@435: } else { duke@435: assert(vtable_index >= 0, "valid index"); duke@435: set_f2(vtable_index); duke@435: } duke@435: byte_no = 2; duke@435: break; duke@435: } jrose@2015: jrose@2015: case Bytecodes::_invokedynamic: // similar to _invokevirtual jrose@2015: if (TraceInvokeDynamic) { jrose@2015: tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d", jrose@2015: (is_secondary_entry() ? " secondary" : ""), jrose@2015: (intptr_t)method(), vtable_index); jrose@2015: method->print(); jrose@2015: this->print(tty, 0); jrose@2015: } jrose@2015: assert(method->can_be_statically_bound(), "must be a MH invoker method"); jrose@2015: assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); twisti@2258: // SystemDictionary::find_method_handle_invoke only caches twisti@2258: // methods which signature classes are on the boot classpath, twisti@2258: // otherwise the newly created method is returned. To avoid twisti@2258: // races in that case we store the first one coming in into the twisti@2258: // cp-cache atomically if it's still unset. twisti@2258: set_f1_if_null_atomic(method()); jrose@2015: needs_vfinal_flag = false; // _f2 is not an oop jrose@2015: assert(!is_vfinal(), "f2 not an oop"); jrose@2017: byte_no = 1; // coordinate this with bytecode_number & is_resolved jrose@2015: break; jrose@2015: duke@435: case Bytecodes::_invokespecial: duke@435: // Preserve the value of the vfinal flag on invokevirtual bytecode duke@435: // which may be shared with this constant pool cache entry. duke@435: needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal(); duke@435: // fall through duke@435: case Bytecodes::_invokestatic: duke@435: set_f1(method()); duke@435: byte_no = 1; duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: break; duke@435: } duke@435: duke@435: set_flags(as_flags(as_TosState(method->result_type()), duke@435: method->is_final_method(), duke@435: needs_vfinal_flag, duke@435: false, duke@435: change_to_virtual, duke@435: true)| duke@435: method()->size_of_parameters()); duke@435: duke@435: // Note: byte_no also appears in TemplateTable::resolve. duke@435: if (byte_no == 1) { duke@435: set_bytecode_1(invoke_code); duke@435: } else if (byte_no == 2) { duke@435: if (change_to_virtual) { duke@435: // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! duke@435: // duke@435: // Workaround for the case where we encounter an invokeinterface, but we duke@435: // should really have an _invokevirtual since the resolved method is a duke@435: // virtual method in java.lang.Object. This is a corner case in the spec duke@435: // but is presumably legal. javac does not generate this code. duke@435: // duke@435: // We set bytecode_1() to _invokeinterface, because that is the duke@435: // bytecode # used by the interpreter to see if it is resolved. duke@435: // We set bytecode_2() to _invokevirtual. duke@435: // See also interpreterRuntime.cpp. (8/25/2000) duke@435: // Only set resolved for the invokeinterface case if method is public. duke@435: // Otherwise, the method needs to be reresolved with caller for each duke@435: // interface call. duke@435: if (method->is_public()) set_bytecode_1(invoke_code); duke@435: set_bytecode_2(Bytecodes::_invokevirtual); duke@435: } else { duke@435: set_bytecode_2(invoke_code); duke@435: } duke@435: } else { duke@435: ShouldNotReachHere(); duke@435: } duke@435: NOT_PRODUCT(verify(tty)); duke@435: } duke@435: duke@435: duke@435: void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) { jrose@2015: assert(!is_secondary_entry(), ""); duke@435: klassOop interf = method->method_holder(); duke@435: assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); duke@435: set_f1(interf); duke@435: set_f2(index); duke@435: set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); duke@435: set_bytecode_1(Bytecodes::_invokeinterface); duke@435: } duke@435: duke@435: jrose@2015: void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) { jrose@2015: assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); jrose@2015: assert(_f2 == 0, "initialize once"); jrose@2015: assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob"); jrose@2015: set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG); jrose@2015: } jrose@2015: jrose@2015: int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() { jrose@2015: assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); jrose@2015: intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG; jrose@2015: assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob"); jrose@2015: return (int) bsm_cache_index; jrose@2015: } jrose@2015: jrose@1862: void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, jrose@1862: methodHandle signature_invoker) { jrose@2015: assert(is_secondary_entry(), ""); jrose@1862: int param_size = signature_invoker->size_of_parameters(); jrose@1494: assert(param_size >= 1, "method argument size must include MH.this"); jrose@1161: param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic jrose@1161: if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) { jrose@1161: // racing threads might be trying to install their own favorites jrose@1161: set_f1(call_site()); jrose@1161: } jrose@1862: bool is_final = true; jrose@1862: assert(signature_invoker->is_final_method(), "is_final"); jrose@1862: set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size); jrose@1161: // do not do set_bytecode on a secondary CP cache entry jrose@1161: //set_bytecode_1(Bytecodes::_invokedynamic); jrose@1161: } jrose@1161: jrose@1161: duke@435: class LocalOopClosure: public OopClosure { duke@435: private: duke@435: void (*_f)(oop*); duke@435: duke@435: public: duke@435: LocalOopClosure(void f(oop*)) { _f = f; } duke@435: virtual void do_oop(oop* o) { _f(o); } coleenp@548: virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); } duke@435: }; duke@435: duke@435: duke@435: void ConstantPoolCacheEntry::oops_do(void f(oop*)) { duke@435: LocalOopClosure blk(f); duke@435: oop_iterate(&blk); duke@435: } duke@435: duke@435: duke@435: void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: blk->do_oop((oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: blk->do_oop((oop*)&_f2); duke@435: } duke@435: } duke@435: duke@435: duke@435: void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2); duke@435: } duke@435: } duke@435: duke@435: duke@435: void ConstantPoolCacheEntry::follow_contents() { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: MarkSweep::mark_and_push((oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: MarkSweep::mark_and_push((oop*)&_f2); duke@435: } duke@435: } duke@435: duke@435: #ifndef SERIALGC duke@435: void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: PSParallelCompact::mark_and_push(cm, (oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: PSParallelCompact::mark_and_push(cm, (oop*)&_f2); duke@435: } duke@435: } duke@435: #endif // SERIALGC duke@435: duke@435: void ConstantPoolCacheEntry::adjust_pointers() { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: MarkSweep::adjust_pointer((oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: MarkSweep::adjust_pointer((oop*)&_f2); duke@435: } duke@435: } duke@435: duke@435: #ifndef SERIALGC duke@435: void ConstantPoolCacheEntry::update_pointers() { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: PSParallelCompact::adjust_pointer((oop*)&_f1); duke@435: if (is_vfinal()) { duke@435: PSParallelCompact::adjust_pointer((oop*)&_f2); duke@435: } duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr, duke@435: HeapWord* end_addr) { duke@435: assert(in_words(size()) == 4, "check code below - may need adjustment"); duke@435: // field[1] is always oop or NULL duke@435: PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr); duke@435: if (is_vfinal()) { duke@435: PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr); duke@435: } duke@435: } duke@435: #endif // SERIALGC duke@435: duke@435: // RedefineClasses() API support: duke@435: // If this constantPoolCacheEntry refers to old_method then update it duke@435: // to refer to new_method. duke@435: bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, duke@435: methodOop new_method, bool * trace_name_printed) { duke@435: duke@435: if (is_vfinal()) { duke@435: // virtual and final so f2() contains method ptr instead of vtable index duke@435: if (f2() == (intptr_t)old_method) { duke@435: // match old_method so need an update duke@435: _f2 = (intptr_t)new_method; duke@435: if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { duke@435: if (!(*trace_name_printed)) { duke@435: // RC_TRACE_MESG macro has an embedded ResourceMark duke@435: RC_TRACE_MESG(("adjust: name=%s", duke@435: Klass::cast(old_method->method_holder())->external_name())); duke@435: *trace_name_printed = true; duke@435: } duke@435: // RC_TRACE macro has an embedded ResourceMark duke@435: RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", duke@435: new_method->name()->as_C_string(), duke@435: new_method->signature()->as_C_string())); duke@435: } duke@435: duke@435: return true; duke@435: } duke@435: duke@435: // f1() is not used with virtual entries so bail out duke@435: return false; duke@435: } duke@435: duke@435: if ((oop)_f1 == NULL) { duke@435: // NULL f1() means this is a virtual entry so bail out duke@435: // We are assuming that the vtable index does not need change. duke@435: return false; duke@435: } duke@435: duke@435: if ((oop)_f1 == old_method) { duke@435: _f1 = new_method; duke@435: if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { duke@435: if (!(*trace_name_printed)) { duke@435: // RC_TRACE_MESG macro has an embedded ResourceMark duke@435: RC_TRACE_MESG(("adjust: name=%s", duke@435: Klass::cast(old_method->method_holder())->external_name())); duke@435: *trace_name_printed = true; duke@435: } duke@435: // RC_TRACE macro has an embedded ResourceMark duke@435: RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", duke@435: new_method->name()->as_C_string(), duke@435: new_method->signature()->as_C_string())); duke@435: } duke@435: duke@435: return true; duke@435: } duke@435: duke@435: return false; duke@435: } duke@435: duke@435: bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { duke@435: if (!is_method_entry()) { duke@435: // not a method entry so not interesting by default duke@435: return false; duke@435: } duke@435: duke@435: methodOop m = NULL; duke@435: if (is_vfinal()) { duke@435: // virtual and final so _f2 contains method ptr instead of vtable index duke@435: m = (methodOop)_f2; duke@435: } else if ((oop)_f1 == NULL) { duke@435: // NULL _f1 means this is a virtual entry so also not interesting duke@435: return false; duke@435: } else { duke@435: if (!((oop)_f1)->is_method()) { duke@435: // _f1 can also contain a klassOop for an interface duke@435: return false; duke@435: } duke@435: m = (methodOop)_f1; duke@435: } duke@435: duke@435: assert(m != NULL && m->is_method(), "sanity check"); duke@435: if (m == NULL || !m->is_method() || m->method_holder() != k) { duke@435: // robustness for above sanity checks or method is not in duke@435: // the interesting class duke@435: return false; duke@435: } duke@435: duke@435: // the method is in the interesting class so the entry is interesting duke@435: return true; duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::print(outputStream* st, int index) const { duke@435: // print separator duke@435: if (index == 0) tty->print_cr(" -------------"); duke@435: // print entry jrose@2015: tty->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this); jrose@1161: if (is_secondary_entry()) jrose@1161: tty->print_cr("[%5d|secondary]", main_entry_index()); jrose@1161: else jrose@1161: tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index()); jrose@2015: tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)(oop)_f1); jrose@2015: tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2); jrose@2015: tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags); duke@435: tty->print_cr(" -------------"); duke@435: } duke@435: duke@435: void ConstantPoolCacheEntry::verify(outputStream* st) const { duke@435: // not implemented yet duke@435: } duke@435: duke@435: // Implementation of ConstantPoolCache duke@435: duke@435: void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { duke@435: assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache"); jrose@1494: for (int i = 0; i < length(); i++) { jrose@1494: ConstantPoolCacheEntry* e = entry_at(i); jrose@1494: int original_index = inverse_index_map[i]; jrose@1494: if ((original_index & Rewriter::_secondary_entry_tag) != 0) { jrose@1494: int main_index = (original_index - Rewriter::_secondary_entry_tag); jrose@1494: assert(!entry_at(main_index)->is_secondary_entry(), "valid main index"); jrose@1494: e->initialize_secondary_entry(main_index); jrose@1494: } else { jrose@1494: e->initialize_entry(original_index); jrose@1494: } jrose@1494: assert(entry_at(i) == e, "sanity"); jrose@1494: } duke@435: } duke@435: duke@435: // RedefineClasses() API support: duke@435: // If any entry of this constantPoolCache points to any of duke@435: // old_methods, replace it with the corresponding new_method. duke@435: void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, duke@435: int methods_length, bool * trace_name_printed) { duke@435: duke@435: if (methods_length == 0) { duke@435: // nothing to do if there are no methods duke@435: return; duke@435: } duke@435: duke@435: // get shorthand for the interesting class duke@435: klassOop old_holder = old_methods[0]->method_holder(); duke@435: duke@435: for (int i = 0; i < length(); i++) { duke@435: if (!entry_at(i)->is_interesting_method_entry(old_holder)) { duke@435: // skip uninteresting methods duke@435: continue; duke@435: } duke@435: duke@435: // The constantPoolCache contains entries for several different duke@435: // things, but we only care about methods. In fact, we only care duke@435: // about methods in the same class as the one that contains the duke@435: // old_methods. At this point, we have an interesting entry. duke@435: duke@435: for (int j = 0; j < methods_length; j++) { duke@435: methodOop old_method = old_methods[j]; duke@435: methodOop new_method = new_methods[j]; duke@435: duke@435: if (entry_at(i)->adjust_method_entry(old_method, new_method, duke@435: trace_name_printed)) { duke@435: // current old_method matched this entry and we updated it so duke@435: // break out and get to the next interesting entry if there one duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: }