src/share/vm/oops/cpCacheOop.cpp

Tue, 24 Jul 2012 10:51:00 -0700

author
twisti
date
Tue, 24 Jul 2012 10:51:00 -0700
changeset 3969
1d7922586cf6
parent 3601
f096e1b74d85
permissions
-rw-r--r--

7023639: JSR 292 method handle invocation needs a fast path for compiled code
6984705: JSR 292 method handle creation should not go through JNI
Summary: remove assembly code for JDK 7 chained method handles
Reviewed-by: jrose, twisti, kvn, mhaupt
Contributed-by: John Rose <john.r.rose@oracle.com>, Christian Thalinger <christian.thalinger@oracle.com>, Michael Haupt <michael.haupt@oracle.com>

     1 /*
     2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/markSweep.inline.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "interpreter/rewriter.hpp"
    29 #include "memory/universe.inline.hpp"
    30 #include "oops/cpCacheOop.hpp"
    31 #include "oops/objArrayOop.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
    34 #include "prims/methodHandles.hpp"
    35 #include "runtime/handles.inline.hpp"
    38 // Implememtation of ConstantPoolCacheEntry
    40 void ConstantPoolCacheEntry::initialize_entry(int index) {
    41   assert(0 < index && index < 0x10000, "sanity check");
    42   _indices = index;
    43   assert(constant_pool_index() == index, "");
    44 }
    46 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
    47   assert(0 <= main_index && main_index < 0x10000, "sanity check");
    48   _indices = (main_index << main_cp_index_bits);
    49   assert(main_entry_index() == main_index, "");
    50 }
    52 int ConstantPoolCacheEntry::make_flags(TosState state,
    53                                        int option_bits,
    54                                        int field_index_or_method_params) {
    55   assert(state < number_of_states, "Invalid state in make_flags");
    56   int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
    57   // Preserve existing flag bit values
    58   // The low bits are a field offset, or else the method parameter size.
    59 #ifdef ASSERT
    60   TosState old_state = flag_state();
    61   assert(old_state == (TosState)0 || old_state == state,
    62          "inconsistent cpCache flags state");
    63 #endif
    64   return (_flags | f) ;
    65 }
    67 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
    68   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
    69 #ifdef ASSERT
    70   // Read once.
    71   volatile Bytecodes::Code c = bytecode_1();
    72   assert(c == 0 || c == code || code == 0, "update must be consistent");
    73 #endif
    74   // Need to flush pending stores here before bytecode is written.
    75   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
    76 }
    78 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
    79   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
    80 #ifdef ASSERT
    81   // Read once.
    82   volatile Bytecodes::Code c = bytecode_2();
    83   assert(c == 0 || c == code || code == 0, "update must be consistent");
    84 #endif
    85   // Need to flush pending stores here before bytecode is written.
    86   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
    87 }
    89 // Sets f1, ordering with previous writes.
    90 void ConstantPoolCacheEntry::release_set_f1(oop f1) {
    91   // Use barriers as in oop_store
    92   assert(f1 != NULL, "");
    93   oop* f1_addr = (oop*) &_f1;
    94   update_barrier_set_pre(f1_addr, f1);
    95   OrderAccess::release_store_ptr((intptr_t*)f1_addr, f1);
    96   update_barrier_set((void*) f1_addr, f1);
    97 }
    99 // Sets flags, but only if the value was previously zero.
   100 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
   101   intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
   102   return (result == 0);
   103 }
   105 #ifdef ASSERT
   106 // It is possible to have two different dummy methodOops created
   107 // when the resolve code for invoke interface executes concurrently
   108 // Hence the assertion below is weakened a bit for the invokeinterface
   109 // case.
   110 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
   111   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
   112          ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
   113          ((methodOop)f1)->signature());
   114 }
   115 #endif
   117 // Note that concurrent update of both bytecodes can leave one of them
   118 // reset to zero.  This is harmless; the interpreter will simply re-resolve
   119 // the damaged entry.  More seriously, the memory synchronization is needed
   120 // to flush other fields (f1, f2) completely to memory before the bytecodes
   121 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
   122 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
   123                                        Bytecodes::Code put_code,
   124                                        KlassHandle field_holder,
   125                                        int field_index,
   126                                        int field_offset,
   127                                        TosState field_type,
   128                                        bool is_final,
   129                                        bool is_volatile) {
   130   set_f1(field_holder()->java_mirror());
   131   set_f2(field_offset);
   132   assert((field_index & field_index_mask) == field_index,
   133          "field index does not fit in low flag bits");
   134   set_field_flags(field_type,
   135                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
   136                   ((is_final    ? 1 : 0) << is_final_shift),
   137                   field_index);
   138   set_bytecode_1(get_code);
   139   set_bytecode_2(put_code);
   140   NOT_PRODUCT(verify(tty));
   141 }
   143 void ConstantPoolCacheEntry::set_parameter_size(int value) {
   144   // This routine is called only in corner cases where the CPCE is not yet initialized.
   145   // See AbstractInterpreter::deopt_continue_after_entry.
   146   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
   147          err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
   148   // Setting the parameter size by itself is only safe if the
   149   // current value of _flags is 0, otherwise another thread may have
   150   // updated it and we don't want to overwrite that value.  Don't
   151   // bother trying to update it once it's nonzero but always make
   152   // sure that the final parameter size agrees with what was passed.
   153   if (_flags == 0) {
   154     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
   155   }
   156   guarantee(parameter_size() == value,
   157             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
   158 }
   160 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
   161                                         methodHandle method,
   162                                         int vtable_index) {
   163   assert(!is_secondary_entry(), "");
   164   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   165   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   167   int byte_no = -1;
   168   bool change_to_virtual = false;
   170   switch (invoke_code) {
   171     case Bytecodes::_invokeinterface:
   172       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
   173       // instruction somehow links to a non-interface method (in Object).
   174       // In that case, the method has no itable index and must be invoked as a virtual.
   175       // Set a flag to keep track of this corner case.
   176       change_to_virtual = true;
   178       // ...and fall through as if we were handling invokevirtual:
   179     case Bytecodes::_invokevirtual:
   180       {
   181         if (method->can_be_statically_bound()) {
   182           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
   183           set_method_flags(as_TosState(method->result_type()),
   184                            (                             1      << is_vfinal_shift) |
   185                            ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
   186                            ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
   187                            method()->size_of_parameters());
   188           set_f2_as_vfinal_method(method());
   189         } else {
   190           assert(vtable_index >= 0, "valid index");
   191           assert(!method->is_final_method(), "sanity");
   192           set_method_flags(as_TosState(method->result_type()),
   193                            ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
   194                            method()->size_of_parameters());
   195           set_f2(vtable_index);
   196         }
   197         byte_no = 2;
   198         break;
   199       }
   201     case Bytecodes::_invokespecial:
   202     case Bytecodes::_invokestatic:
   203       // Note:  Read and preserve the value of the is_vfinal flag on any
   204       // invokevirtual bytecode shared with this constant pool cache entry.
   205       // It is cheap and safe to consult is_vfinal() at all times.
   206       // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
   207       set_method_flags(as_TosState(method->result_type()),
   208                        ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
   209                        ((method->is_final_method() ? 1 : 0) << is_final_shift),
   210                        method()->size_of_parameters());
   211       set_f1(method());
   212       byte_no = 1;
   213       break;
   214     default:
   215       ShouldNotReachHere();
   216       break;
   217   }
   219   // Note:  byte_no also appears in TemplateTable::resolve.
   220   if (byte_no == 1) {
   221     assert(invoke_code != Bytecodes::_invokevirtual &&
   222            invoke_code != Bytecodes::_invokeinterface, "");
   223     set_bytecode_1(invoke_code);
   224   } else if (byte_no == 2)  {
   225     if (change_to_virtual) {
   226       assert(invoke_code == Bytecodes::_invokeinterface, "");
   227       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
   228       //
   229       // Workaround for the case where we encounter an invokeinterface, but we
   230       // should really have an _invokevirtual since the resolved method is a
   231       // virtual method in java.lang.Object. This is a corner case in the spec
   232       // but is presumably legal. javac does not generate this code.
   233       //
   234       // We set bytecode_1() to _invokeinterface, because that is the
   235       // bytecode # used by the interpreter to see if it is resolved.
   236       // We set bytecode_2() to _invokevirtual.
   237       // See also interpreterRuntime.cpp. (8/25/2000)
   238       // Only set resolved for the invokeinterface case if method is public.
   239       // Otherwise, the method needs to be reresolved with caller for each
   240       // interface call.
   241       if (method->is_public()) set_bytecode_1(invoke_code);
   242     } else {
   243       assert(invoke_code == Bytecodes::_invokevirtual, "");
   244     }
   245     // set up for invokevirtual, even if linking for invokeinterface also:
   246     set_bytecode_2(Bytecodes::_invokevirtual);
   247   } else {
   248     ShouldNotReachHere();
   249   }
   250   NOT_PRODUCT(verify(tty));
   251 }
   254 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
   255   assert(!is_secondary_entry(), "");
   256   klassOop interf = method->method_holder();
   257   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
   258   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
   259   set_f1(interf);
   260   set_f2(index);
   261   set_method_flags(as_TosState(method->result_type()),
   262                    0,  // no option bits
   263                    method()->size_of_parameters());
   264   set_bytecode_1(Bytecodes::_invokeinterface);
   265 }
   268 void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) {
   269   assert(!is_secondary_entry(), "");
   270   set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix);
   271 }
   273 void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) {
   274   assert(is_secondary_entry(), "");
   275   set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix);
   276 }
   278 void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) {
   279   // NOTE: This CPCE can be the subject of data races.
   280   // There are three words to update: flags, f2, f1 (in that order).
   281   // Writers must store all other values before f1.
   282   // Readers must test f1 first for non-null before reading other fields.
   283   // Competing writers must acquire exclusive access on the first
   284   // write, to flags, using a compare/exchange.
   285   // A losing writer must spin until the winner writes f1,
   286   // so that when he returns, he can use the linked cache entry.
   288   bool has_appendix = appendix.not_null();
   289   if (!has_appendix) {
   290     // The extra argument is not used, but we need a non-null value to signify linkage state.
   291     // Set it to something benign that will never leak memory.
   292     appendix = Universe::void_mirror();
   293   }
   295   bool owner =
   296     init_method_flags_atomic(as_TosState(adapter->result_type()),
   297                    ((has_appendix ?  1 : 0) << has_appendix_shift) |
   298                    (                 1      << is_vfinal_shift)    |
   299                    (                 1      << is_final_shift),
   300                    adapter->size_of_parameters());
   301   if (!owner) {
   302     while (is_f1_null()) {
   303       // Pause momentarily on a low-level lock, to allow racing thread to win.
   304       MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag);
   305       os::yield();
   306     }
   307     return;
   308   }
   310   if (TraceInvokeDynamic) {
   311     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
   312                   invoke_code,
   313                   (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
   314                   (intptr_t)adapter());
   315     adapter->print();
   316     if (has_appendix)  appendix()->print();
   317   }
   319   // Method handle invokes and invokedynamic sites use both cp cache words.
   320   // f1, if not null, contains a value passed as a trailing argument to the adapter.
   321   // In the general case, this could be the call site's MethodType,
   322   // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
   323   // f2 contains the adapter method which manages the actual call.
   324   // In the general case, this is a compiled LambdaForm.
   325   // (The Java code is free to optimize these calls by binding other
   326   // sorts of methods and appendices to call sites.)
   327   // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased.
   328   // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
   329   // In principle this means that the method (with appendix) could take up to 256 parameter slots.
   330   //
   331   // This means that given a call site like (List)mh.invoke("foo"),
   332   // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
   333   // not '(Ljava/lang/String;)Ljava/util/List;'.
   334   // The fact that String and List are involved is encoded in the MethodType in f1.
   335   // This allows us to create fewer method oops, while keeping type safety.
   336   //
   337   set_f2_as_vfinal_method(adapter());
   338   assert(appendix.not_null(), "needed for linkage state");
   339   release_set_f1(appendix());  // This must be the last one to set (see NOTE above)!
   340   if (!is_secondary_entry()) {
   341     // The interpreter assembly code does not check byte_2,
   342     // but it is used by is_resolved, method_if_resolved, etc.
   343     set_bytecode_2(invoke_code);
   344   }
   345   NOT_PRODUCT(verify(tty));
   346   if (TraceInvokeDynamic) {
   347     this->print(tty, 0);
   348   }
   349 }
   351 methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
   352   if (is_secondary_entry()) {
   353     if (!is_f1_null())
   354       return f2_as_vfinal_method();
   355     return NULL;
   356   }
   357   // Decode the action of set_method and set_interface_call
   358   Bytecodes::Code invoke_code = bytecode_1();
   359   if (invoke_code != (Bytecodes::Code)0) {
   360     oop f1 = _f1;
   361     if (f1 != NULL) {
   362       switch (invoke_code) {
   363       case Bytecodes::_invokeinterface:
   364         assert(f1->is_klass(), "");
   365         return klassItable::method_for_itable_index(klassOop(f1), f2_as_index());
   366       case Bytecodes::_invokestatic:
   367       case Bytecodes::_invokespecial:
   368         assert(!has_appendix(), "");
   369         assert(f1->is_method(), "");
   370         return methodOop(f1);
   371       }
   372     }
   373   }
   374   invoke_code = bytecode_2();
   375   if (invoke_code != (Bytecodes::Code)0) {
   376     switch (invoke_code) {
   377     case Bytecodes::_invokevirtual:
   378       if (is_vfinal()) {
   379         // invokevirtual
   380         methodOop m = f2_as_vfinal_method();
   381         assert(m->is_method(), "");
   382         return m;
   383       } else {
   384         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
   385         if (cpool->tag_at(holder_index).is_klass()) {
   386           klassOop klass = cpool->resolved_klass_at(holder_index);
   387           if (!Klass::cast(klass)->oop_is_instance())
   388             klass = SystemDictionary::Object_klass();
   389           return instanceKlass::cast(klass)->method_at_vtable(f2_as_index());
   390         }
   391       }
   392       break;
   393     case Bytecodes::_invokehandle:
   394     case Bytecodes::_invokedynamic:
   395       return f2_as_vfinal_method();
   396     }
   397   }
   398   return NULL;
   399 }
   402 class LocalOopClosure: public OopClosure {
   403  private:
   404   void (*_f)(oop*);
   406  public:
   407   LocalOopClosure(void f(oop*))        { _f = f; }
   408   virtual void do_oop(oop* o)          { _f(o); }
   409   virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
   410 };
   413 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
   414   LocalOopClosure blk(f);
   415   oop_iterate(&blk);
   416 }
   419 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
   420   assert(in_words(size()) == 4, "check code below - may need adjustment");
   421   // field[1] is always oop or NULL
   422   blk->do_oop((oop*)&_f1);
   423   if (is_vfinal()) {
   424     blk->do_oop((oop*)&_f2);
   425   }
   426 }
   429 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
   430   assert(in_words(size()) == 4, "check code below - may need adjustment");
   431   // field[1] is always oop or NULL
   432   if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
   433   if (is_vfinal()) {
   434     if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
   435   }
   436 }
   439 void ConstantPoolCacheEntry::follow_contents() {
   440   assert(in_words(size()) == 4, "check code below - may need adjustment");
   441   // field[1] is always oop or NULL
   442   MarkSweep::mark_and_push((oop*)&_f1);
   443   if (is_vfinal()) {
   444     MarkSweep::mark_and_push((oop*)&_f2);
   445   }
   446 }
   448 #ifndef SERIALGC
   449 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
   450   assert(in_words(size()) == 4, "check code below - may need adjustment");
   451   // field[1] is always oop or NULL
   452   PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
   453   if (is_vfinal()) {
   454     PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
   455   }
   456 }
   457 #endif // SERIALGC
   459 void ConstantPoolCacheEntry::adjust_pointers() {
   460   assert(in_words(size()) == 4, "check code below - may need adjustment");
   461   // field[1] is always oop or NULL
   462   MarkSweep::adjust_pointer((oop*)&_f1);
   463   if (is_vfinal()) {
   464     MarkSweep::adjust_pointer((oop*)&_f2);
   465   }
   466 }
   468 #ifndef SERIALGC
   469 void ConstantPoolCacheEntry::update_pointers() {
   470   assert(in_words(size()) == 4, "check code below - may need adjustment");
   471   // field[1] is always oop or NULL
   472   PSParallelCompact::adjust_pointer((oop*)&_f1);
   473   if (is_vfinal()) {
   474     PSParallelCompact::adjust_pointer((oop*)&_f2);
   475   }
   476 }
   477 #endif // SERIALGC
   479 // RedefineClasses() API support:
   480 // If this constantPoolCacheEntry refers to old_method then update it
   481 // to refer to new_method.
   482 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
   483        methodOop new_method, bool * trace_name_printed) {
   485   if (is_vfinal()) {
   486     // virtual and final so _f2 contains method ptr instead of vtable index
   487     if (f2_as_vfinal_method() == old_method) {
   488       // match old_method so need an update
   489       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
   490       _f2 = (intptr_t)new_method;
   491       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   492         if (!(*trace_name_printed)) {
   493           // RC_TRACE_MESG macro has an embedded ResourceMark
   494           RC_TRACE_MESG(("adjust: name=%s",
   495             Klass::cast(old_method->method_holder())->external_name()));
   496           *trace_name_printed = true;
   497         }
   498         // RC_TRACE macro has an embedded ResourceMark
   499         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
   500           new_method->name()->as_C_string(),
   501           new_method->signature()->as_C_string()));
   502       }
   504       return true;
   505     }
   507     // f1() is not used with virtual entries so bail out
   508     return false;
   509   }
   511   if ((oop)_f1 == NULL) {
   512     // NULL f1() means this is a virtual entry so bail out
   513     // We are assuming that the vtable index does not need change.
   514     return false;
   515   }
   517   if ((oop)_f1 == old_method) {
   518     _f1 = new_method;
   519     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   520       if (!(*trace_name_printed)) {
   521         // RC_TRACE_MESG macro has an embedded ResourceMark
   522         RC_TRACE_MESG(("adjust: name=%s",
   523           Klass::cast(old_method->method_holder())->external_name()));
   524         *trace_name_printed = true;
   525       }
   526       // RC_TRACE macro has an embedded ResourceMark
   527       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
   528         new_method->name()->as_C_string(),
   529         new_method->signature()->as_C_string()));
   530     }
   532     return true;
   533   }
   535   return false;
   536 }
   538 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
   539   if (!is_method_entry()) {
   540     // not a method entry so not interesting by default
   541     return false;
   542   }
   544   methodOop m = NULL;
   545   if (is_vfinal()) {
   546     // virtual and final so _f2 contains method ptr instead of vtable index
   547     m = f2_as_vfinal_method();
   548   } else if (is_f1_null()) {
   549     // NULL _f1 means this is a virtual entry so also not interesting
   550     return false;
   551   } else {
   552     oop f1 = _f1;  // _f1 is volatile
   553     if (!f1->is_method()) {
   554       // _f1 can also contain a klassOop for an interface
   555       return false;
   556     }
   557     m = f1_as_method();
   558   }
   560   assert(m != NULL && m->is_method(), "sanity check");
   561   if (m == NULL || !m->is_method() || m->method_holder() != k) {
   562     // robustness for above sanity checks or method is not in
   563     // the interesting class
   564     return false;
   565   }
   567   // the method is in the interesting class so the entry is interesting
   568   return true;
   569 }
   571 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
   572   // print separator
   573   if (index == 0) st->print_cr("                 -------------");
   574   // print entry
   575   st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   576   if (is_secondary_entry())
   577     st->print_cr("[%5d|secondary]", main_entry_index());
   578   else
   579     st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
   580   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
   581   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
   582   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
   583   st->print_cr("                 -------------");
   584 }
   586 void ConstantPoolCacheEntry::verify(outputStream* st) const {
   587   // not implemented yet
   588 }
   590 // Implementation of ConstantPoolCache
   592 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
   593   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
   594   for (int i = 0; i < length(); i++) {
   595     ConstantPoolCacheEntry* e = entry_at(i);
   596     int original_index = inverse_index_map[i];
   597     if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
   598       int main_index = (original_index - Rewriter::_secondary_entry_tag);
   599       assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
   600       e->initialize_secondary_entry(main_index);
   601     } else {
   602       e->initialize_entry(original_index);
   603     }
   604     assert(entry_at(i) == e, "sanity");
   605   }
   606 }
   608 // RedefineClasses() API support:
   609 // If any entry of this constantPoolCache points to any of
   610 // old_methods, replace it with the corresponding new_method.
   611 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
   612                                                      int methods_length, bool * trace_name_printed) {
   614   if (methods_length == 0) {
   615     // nothing to do if there are no methods
   616     return;
   617   }
   619   // get shorthand for the interesting class
   620   klassOop old_holder = old_methods[0]->method_holder();
   622   for (int i = 0; i < length(); i++) {
   623     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
   624       // skip uninteresting methods
   625       continue;
   626     }
   628     // The constantPoolCache contains entries for several different
   629     // things, but we only care about methods. In fact, we only care
   630     // about methods in the same class as the one that contains the
   631     // old_methods. At this point, we have an interesting entry.
   633     for (int j = 0; j < methods_length; j++) {
   634       methodOop old_method = old_methods[j];
   635       methodOop new_method = new_methods[j];
   637       if (entry_at(i)->adjust_method_entry(old_method, new_method,
   638           trace_name_printed)) {
   639         // current old_method matched this entry and we updated it so
   640         // break out and get to the next interesting entry if there one
   641         break;
   642       }
   643     }
   644   }
   645 }

mercurial