src/share/vm/oops/cpCacheOop.cpp

Thu, 27 May 2010 19:08:38 -0700

author
trims
date
Thu, 27 May 2010 19:08:38 -0700
changeset 1907
c18cbe5936b8
parent 1862
cd5dbf694d45
child 2015
083fde3b838e
permissions
-rw-r--r--

6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair

duke@435 1 /*
trims@1907 2 * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_cpCacheOop.cpp.incl"
duke@435 27
duke@435 28
duke@435 29 // Implememtation of ConstantPoolCacheEntry
duke@435 30
jrose@1494 31 void ConstantPoolCacheEntry::initialize_entry(int index) {
jrose@1161 32 assert(0 < index && index < 0x10000, "sanity check");
duke@435 33 _indices = index;
jrose@1161 34 assert(constant_pool_index() == index, "");
duke@435 35 }
duke@435 36
jrose@1494 37 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
jrose@1494 38 assert(0 <= main_index && main_index < 0x10000, "sanity check");
jrose@1494 39 _indices = (main_index << 16);
jrose@1494 40 assert(main_entry_index() == main_index, "");
jrose@1494 41 }
duke@435 42
duke@435 43 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
duke@435 44 bool is_vfinal, bool is_volatile,
duke@435 45 bool is_method_interface, bool is_method) {
duke@435 46 int f = state;
duke@435 47
duke@435 48 assert( state < number_of_states, "Invalid state in as_flags");
duke@435 49
duke@435 50 f <<= 1;
duke@435 51 if (is_final) f |= 1;
duke@435 52 f <<= 1;
duke@435 53 if (is_vfinal) f |= 1;
duke@435 54 f <<= 1;
duke@435 55 if (is_volatile) f |= 1;
duke@435 56 f <<= 1;
duke@435 57 if (is_method_interface) f |= 1;
duke@435 58 f <<= 1;
duke@435 59 if (is_method) f |= 1;
duke@435 60 f <<= ConstantPoolCacheEntry::hotSwapBit;
duke@435 61 // Preserve existing flag bit values
duke@435 62 #ifdef ASSERT
duke@435 63 int old_state = ((_flags >> tosBits) & 0x0F);
duke@435 64 assert(old_state == 0 || old_state == state,
duke@435 65 "inconsistent cpCache flags state");
duke@435 66 #endif
duke@435 67 return (_flags | f) ;
duke@435 68 }
duke@435 69
duke@435 70 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
duke@435 71 #ifdef ASSERT
duke@435 72 // Read once.
duke@435 73 volatile Bytecodes::Code c = bytecode_1();
duke@435 74 assert(c == 0 || c == code || code == 0, "update must be consistent");
duke@435 75 #endif
duke@435 76 // Need to flush pending stores here before bytecode is written.
duke@435 77 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
duke@435 78 }
duke@435 79
duke@435 80 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
duke@435 81 #ifdef ASSERT
duke@435 82 // Read once.
duke@435 83 volatile Bytecodes::Code c = bytecode_2();
duke@435 84 assert(c == 0 || c == code || code == 0, "update must be consistent");
duke@435 85 #endif
duke@435 86 // Need to flush pending stores here before bytecode is written.
duke@435 87 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
duke@435 88 }
duke@435 89
duke@435 90 #ifdef ASSERT
duke@435 91 // It is possible to have two different dummy methodOops created
duke@435 92 // when the resolve code for invoke interface executes concurrently
duke@435 93 // Hence the assertion below is weakened a bit for the invokeinterface
duke@435 94 // case.
duke@435 95 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
duke@435 96 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
duke@435 97 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
duke@435 98 ((methodOop)f1)->signature());
duke@435 99 }
duke@435 100 #endif
duke@435 101
duke@435 102 // Note that concurrent update of both bytecodes can leave one of them
duke@435 103 // reset to zero. This is harmless; the interpreter will simply re-resolve
duke@435 104 // the damaged entry. More seriously, the memory synchronization is needed
duke@435 105 // to flush other fields (f1, f2) completely to memory before the bytecodes
duke@435 106 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
duke@435 107 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
duke@435 108 Bytecodes::Code put_code,
duke@435 109 KlassHandle field_holder,
duke@435 110 int orig_field_index,
duke@435 111 int field_offset,
duke@435 112 TosState field_type,
duke@435 113 bool is_final,
duke@435 114 bool is_volatile) {
duke@435 115 set_f1(field_holder());
duke@435 116 set_f2(field_offset);
duke@435 117 // The field index is used by jvm/ti and is the index into fields() array
duke@435 118 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
duke@435 119 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
duke@435 120 const int field_index = orig_field_index / instanceKlass::next_offset;
duke@435 121 assert(field_index <= field_index_mask,
duke@435 122 "field index does not fit in low flag bits");
duke@435 123 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
duke@435 124 (field_index & field_index_mask));
duke@435 125 set_bytecode_1(get_code);
duke@435 126 set_bytecode_2(put_code);
duke@435 127 NOT_PRODUCT(verify(tty));
duke@435 128 }
duke@435 129
duke@435 130 int ConstantPoolCacheEntry::field_index() const {
duke@435 131 return (_flags & field_index_mask) * instanceKlass::next_offset;
duke@435 132 }
duke@435 133
duke@435 134 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
duke@435 135 methodHandle method,
duke@435 136 int vtable_index) {
duke@435 137
duke@435 138 assert(method->interpreter_entry() != NULL, "should have been set at this point");
duke@435 139 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
duke@435 140 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
duke@435 141
duke@435 142 int byte_no = -1;
duke@435 143 bool needs_vfinal_flag = false;
duke@435 144 switch (invoke_code) {
jrose@1161 145 case Bytecodes::_invokedynamic:
duke@435 146 case Bytecodes::_invokevirtual:
duke@435 147 case Bytecodes::_invokeinterface: {
duke@435 148 if (method->can_be_statically_bound()) {
duke@435 149 set_f2((intptr_t)method());
duke@435 150 needs_vfinal_flag = true;
duke@435 151 } else {
duke@435 152 assert(vtable_index >= 0, "valid index");
duke@435 153 set_f2(vtable_index);
duke@435 154 }
duke@435 155 byte_no = 2;
duke@435 156 break;
duke@435 157 }
duke@435 158 case Bytecodes::_invokespecial:
duke@435 159 // Preserve the value of the vfinal flag on invokevirtual bytecode
duke@435 160 // which may be shared with this constant pool cache entry.
duke@435 161 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
duke@435 162 // fall through
duke@435 163 case Bytecodes::_invokestatic:
duke@435 164 set_f1(method());
duke@435 165 byte_no = 1;
duke@435 166 break;
duke@435 167 default:
duke@435 168 ShouldNotReachHere();
duke@435 169 break;
duke@435 170 }
duke@435 171
duke@435 172 set_flags(as_flags(as_TosState(method->result_type()),
duke@435 173 method->is_final_method(),
duke@435 174 needs_vfinal_flag,
duke@435 175 false,
duke@435 176 change_to_virtual,
duke@435 177 true)|
duke@435 178 method()->size_of_parameters());
duke@435 179
duke@435 180 // Note: byte_no also appears in TemplateTable::resolve.
duke@435 181 if (byte_no == 1) {
duke@435 182 set_bytecode_1(invoke_code);
duke@435 183 } else if (byte_no == 2) {
duke@435 184 if (change_to_virtual) {
duke@435 185 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
duke@435 186 //
duke@435 187 // Workaround for the case where we encounter an invokeinterface, but we
duke@435 188 // should really have an _invokevirtual since the resolved method is a
duke@435 189 // virtual method in java.lang.Object. This is a corner case in the spec
duke@435 190 // but is presumably legal. javac does not generate this code.
duke@435 191 //
duke@435 192 // We set bytecode_1() to _invokeinterface, because that is the
duke@435 193 // bytecode # used by the interpreter to see if it is resolved.
duke@435 194 // We set bytecode_2() to _invokevirtual.
duke@435 195 // See also interpreterRuntime.cpp. (8/25/2000)
duke@435 196 // Only set resolved for the invokeinterface case if method is public.
duke@435 197 // Otherwise, the method needs to be reresolved with caller for each
duke@435 198 // interface call.
duke@435 199 if (method->is_public()) set_bytecode_1(invoke_code);
duke@435 200 set_bytecode_2(Bytecodes::_invokevirtual);
duke@435 201 } else {
duke@435 202 set_bytecode_2(invoke_code);
duke@435 203 }
duke@435 204 } else {
duke@435 205 ShouldNotReachHere();
duke@435 206 }
duke@435 207 NOT_PRODUCT(verify(tty));
duke@435 208 }
duke@435 209
duke@435 210
duke@435 211 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
duke@435 212 klassOop interf = method->method_holder();
duke@435 213 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
duke@435 214 set_f1(interf);
duke@435 215 set_f2(index);
duke@435 216 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
duke@435 217 set_bytecode_1(Bytecodes::_invokeinterface);
duke@435 218 }
duke@435 219
duke@435 220
jrose@1862 221 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
jrose@1862 222 methodHandle signature_invoker) {
jrose@1862 223 int param_size = signature_invoker->size_of_parameters();
jrose@1494 224 assert(param_size >= 1, "method argument size must include MH.this");
jrose@1161 225 param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
jrose@1161 226 if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
jrose@1161 227 // racing threads might be trying to install their own favorites
jrose@1161 228 set_f1(call_site());
jrose@1161 229 }
jrose@1862 230 //set_f2(0);
jrose@1862 231 bool is_final = true;
jrose@1862 232 assert(signature_invoker->is_final_method(), "is_final");
jrose@1862 233 set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
jrose@1161 234 // do not do set_bytecode on a secondary CP cache entry
jrose@1161 235 //set_bytecode_1(Bytecodes::_invokedynamic);
jrose@1161 236 }
jrose@1161 237
jrose@1161 238
duke@435 239 class LocalOopClosure: public OopClosure {
duke@435 240 private:
duke@435 241 void (*_f)(oop*);
duke@435 242
duke@435 243 public:
duke@435 244 LocalOopClosure(void f(oop*)) { _f = f; }
duke@435 245 virtual void do_oop(oop* o) { _f(o); }
coleenp@548 246 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
duke@435 247 };
duke@435 248
duke@435 249
duke@435 250 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
duke@435 251 LocalOopClosure blk(f);
duke@435 252 oop_iterate(&blk);
duke@435 253 }
duke@435 254
duke@435 255
duke@435 256 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
duke@435 257 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 258 // field[1] is always oop or NULL
duke@435 259 blk->do_oop((oop*)&_f1);
duke@435 260 if (is_vfinal()) {
duke@435 261 blk->do_oop((oop*)&_f2);
duke@435 262 }
duke@435 263 }
duke@435 264
duke@435 265
duke@435 266 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
duke@435 267 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 268 // field[1] is always oop or NULL
duke@435 269 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
duke@435 270 if (is_vfinal()) {
duke@435 271 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
duke@435 272 }
duke@435 273 }
duke@435 274
duke@435 275
duke@435 276 void ConstantPoolCacheEntry::follow_contents() {
duke@435 277 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 278 // field[1] is always oop or NULL
duke@435 279 MarkSweep::mark_and_push((oop*)&_f1);
duke@435 280 if (is_vfinal()) {
duke@435 281 MarkSweep::mark_and_push((oop*)&_f2);
duke@435 282 }
duke@435 283 }
duke@435 284
duke@435 285 #ifndef SERIALGC
duke@435 286 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
duke@435 287 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 288 // field[1] is always oop or NULL
duke@435 289 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
duke@435 290 if (is_vfinal()) {
duke@435 291 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
duke@435 292 }
duke@435 293 }
duke@435 294 #endif // SERIALGC
duke@435 295
duke@435 296 void ConstantPoolCacheEntry::adjust_pointers() {
duke@435 297 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 298 // field[1] is always oop or NULL
duke@435 299 MarkSweep::adjust_pointer((oop*)&_f1);
duke@435 300 if (is_vfinal()) {
duke@435 301 MarkSweep::adjust_pointer((oop*)&_f2);
duke@435 302 }
duke@435 303 }
duke@435 304
duke@435 305 #ifndef SERIALGC
duke@435 306 void ConstantPoolCacheEntry::update_pointers() {
duke@435 307 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 308 // field[1] is always oop or NULL
duke@435 309 PSParallelCompact::adjust_pointer((oop*)&_f1);
duke@435 310 if (is_vfinal()) {
duke@435 311 PSParallelCompact::adjust_pointer((oop*)&_f2);
duke@435 312 }
duke@435 313 }
duke@435 314
duke@435 315 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
duke@435 316 HeapWord* end_addr) {
duke@435 317 assert(in_words(size()) == 4, "check code below - may need adjustment");
duke@435 318 // field[1] is always oop or NULL
duke@435 319 PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
duke@435 320 if (is_vfinal()) {
duke@435 321 PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
duke@435 322 }
duke@435 323 }
duke@435 324 #endif // SERIALGC
duke@435 325
duke@435 326 // RedefineClasses() API support:
duke@435 327 // If this constantPoolCacheEntry refers to old_method then update it
duke@435 328 // to refer to new_method.
duke@435 329 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
duke@435 330 methodOop new_method, bool * trace_name_printed) {
duke@435 331
duke@435 332 if (is_vfinal()) {
duke@435 333 // virtual and final so f2() contains method ptr instead of vtable index
duke@435 334 if (f2() == (intptr_t)old_method) {
duke@435 335 // match old_method so need an update
duke@435 336 _f2 = (intptr_t)new_method;
duke@435 337 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
duke@435 338 if (!(*trace_name_printed)) {
duke@435 339 // RC_TRACE_MESG macro has an embedded ResourceMark
duke@435 340 RC_TRACE_MESG(("adjust: name=%s",
duke@435 341 Klass::cast(old_method->method_holder())->external_name()));
duke@435 342 *trace_name_printed = true;
duke@435 343 }
duke@435 344 // RC_TRACE macro has an embedded ResourceMark
duke@435 345 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
duke@435 346 new_method->name()->as_C_string(),
duke@435 347 new_method->signature()->as_C_string()));
duke@435 348 }
duke@435 349
duke@435 350 return true;
duke@435 351 }
duke@435 352
duke@435 353 // f1() is not used with virtual entries so bail out
duke@435 354 return false;
duke@435 355 }
duke@435 356
duke@435 357 if ((oop)_f1 == NULL) {
duke@435 358 // NULL f1() means this is a virtual entry so bail out
duke@435 359 // We are assuming that the vtable index does not need change.
duke@435 360 return false;
duke@435 361 }
duke@435 362
duke@435 363 if ((oop)_f1 == old_method) {
duke@435 364 _f1 = new_method;
duke@435 365 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
duke@435 366 if (!(*trace_name_printed)) {
duke@435 367 // RC_TRACE_MESG macro has an embedded ResourceMark
duke@435 368 RC_TRACE_MESG(("adjust: name=%s",
duke@435 369 Klass::cast(old_method->method_holder())->external_name()));
duke@435 370 *trace_name_printed = true;
duke@435 371 }
duke@435 372 // RC_TRACE macro has an embedded ResourceMark
duke@435 373 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
duke@435 374 new_method->name()->as_C_string(),
duke@435 375 new_method->signature()->as_C_string()));
duke@435 376 }
duke@435 377
duke@435 378 return true;
duke@435 379 }
duke@435 380
duke@435 381 return false;
duke@435 382 }
duke@435 383
duke@435 384 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
duke@435 385 if (!is_method_entry()) {
duke@435 386 // not a method entry so not interesting by default
duke@435 387 return false;
duke@435 388 }
duke@435 389
duke@435 390 methodOop m = NULL;
duke@435 391 if (is_vfinal()) {
duke@435 392 // virtual and final so _f2 contains method ptr instead of vtable index
duke@435 393 m = (methodOop)_f2;
duke@435 394 } else if ((oop)_f1 == NULL) {
duke@435 395 // NULL _f1 means this is a virtual entry so also not interesting
duke@435 396 return false;
duke@435 397 } else {
duke@435 398 if (!((oop)_f1)->is_method()) {
duke@435 399 // _f1 can also contain a klassOop for an interface
duke@435 400 return false;
duke@435 401 }
duke@435 402 m = (methodOop)_f1;
duke@435 403 }
duke@435 404
duke@435 405 assert(m != NULL && m->is_method(), "sanity check");
duke@435 406 if (m == NULL || !m->is_method() || m->method_holder() != k) {
duke@435 407 // robustness for above sanity checks or method is not in
duke@435 408 // the interesting class
duke@435 409 return false;
duke@435 410 }
duke@435 411
duke@435 412 // the method is in the interesting class so the entry is interesting
duke@435 413 return true;
duke@435 414 }
duke@435 415
duke@435 416 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
duke@435 417 // print separator
duke@435 418 if (index == 0) tty->print_cr(" -------------");
duke@435 419 // print entry
jrose@1161 420 tty->print_cr("%3d (%08x) ", index, this);
jrose@1161 421 if (is_secondary_entry())
jrose@1161 422 tty->print_cr("[%5d|secondary]", main_entry_index());
jrose@1161 423 else
jrose@1161 424 tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
duke@435 425 tty->print_cr(" [ %08x]", (address)(oop)_f1);
duke@435 426 tty->print_cr(" [ %08x]", _f2);
duke@435 427 tty->print_cr(" [ %08x]", _flags);
duke@435 428 tty->print_cr(" -------------");
duke@435 429 }
duke@435 430
duke@435 431 void ConstantPoolCacheEntry::verify(outputStream* st) const {
duke@435 432 // not implemented yet
duke@435 433 }
duke@435 434
duke@435 435 // Implementation of ConstantPoolCache
duke@435 436
duke@435 437 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
duke@435 438 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
jrose@1494 439 for (int i = 0; i < length(); i++) {
jrose@1494 440 ConstantPoolCacheEntry* e = entry_at(i);
jrose@1494 441 int original_index = inverse_index_map[i];
jrose@1494 442 if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
jrose@1494 443 int main_index = (original_index - Rewriter::_secondary_entry_tag);
jrose@1494 444 assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
jrose@1494 445 e->initialize_secondary_entry(main_index);
jrose@1494 446 } else {
jrose@1494 447 e->initialize_entry(original_index);
jrose@1494 448 }
jrose@1494 449 assert(entry_at(i) == e, "sanity");
jrose@1494 450 }
duke@435 451 }
duke@435 452
duke@435 453 // RedefineClasses() API support:
duke@435 454 // If any entry of this constantPoolCache points to any of
duke@435 455 // old_methods, replace it with the corresponding new_method.
duke@435 456 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
duke@435 457 int methods_length, bool * trace_name_printed) {
duke@435 458
duke@435 459 if (methods_length == 0) {
duke@435 460 // nothing to do if there are no methods
duke@435 461 return;
duke@435 462 }
duke@435 463
duke@435 464 // get shorthand for the interesting class
duke@435 465 klassOop old_holder = old_methods[0]->method_holder();
duke@435 466
duke@435 467 for (int i = 0; i < length(); i++) {
duke@435 468 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
duke@435 469 // skip uninteresting methods
duke@435 470 continue;
duke@435 471 }
duke@435 472
duke@435 473 // The constantPoolCache contains entries for several different
duke@435 474 // things, but we only care about methods. In fact, we only care
duke@435 475 // about methods in the same class as the one that contains the
duke@435 476 // old_methods. At this point, we have an interesting entry.
duke@435 477
duke@435 478 for (int j = 0; j < methods_length; j++) {
duke@435 479 methodOop old_method = old_methods[j];
duke@435 480 methodOop new_method = new_methods[j];
duke@435 481
duke@435 482 if (entry_at(i)->adjust_method_entry(old_method, new_method,
duke@435 483 trace_name_printed)) {
duke@435 484 // current old_method matched this entry and we updated it so
duke@435 485 // break out and get to the next interesting entry if there one
duke@435 486 break;
duke@435 487 }
duke@435 488 }
duke@435 489 }
duke@435 490 }

mercurial