src/share/vm/oops/cpCacheOop.cpp

Sat, 30 Oct 2010 11:45:35 -0700

author
jrose
date
Sat, 30 Oct 2010 11:45:35 -0700
changeset 2265
d1896d1dda3e
parent 2258
87d6a4d1ecbc
child 2314
f95d63e2154a
permissions
-rw-r--r--

6981788: GC map generator sometimes picks up the wrong kind of instruction operand
Summary: Distinguish pool indexes from cache indexes in recently changed code.
Reviewed-by: never

     1 /*
     2  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_cpCacheOop.cpp.incl"
    29 // Implememtation of ConstantPoolCacheEntry
    31 void ConstantPoolCacheEntry::initialize_entry(int index) {
    32   assert(0 < index && index < 0x10000, "sanity check");
    33   _indices = index;
    34   assert(constant_pool_index() == index, "");
    35 }
    37 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
    38   assert(0 <= main_index && main_index < 0x10000, "sanity check");
    39   _indices = (main_index << 16);
    40   assert(main_entry_index() == main_index, "");
    41 }
    43 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
    44                     bool is_vfinal, bool is_volatile,
    45                     bool is_method_interface, bool is_method) {
    46   int f = state;
    48   assert( state < number_of_states, "Invalid state in as_flags");
    50   f <<= 1;
    51   if (is_final) f |= 1;
    52   f <<= 1;
    53   if (is_vfinal) f |= 1;
    54   f <<= 1;
    55   if (is_volatile) f |= 1;
    56   f <<= 1;
    57   if (is_method_interface) f |= 1;
    58   f <<= 1;
    59   if (is_method) f |= 1;
    60   f <<= ConstantPoolCacheEntry::hotSwapBit;
    61   // Preserve existing flag bit values
    62 #ifdef ASSERT
    63   int old_state = ((_flags >> tosBits) & 0x0F);
    64   assert(old_state == 0 || old_state == state,
    65          "inconsistent cpCache flags state");
    66 #endif
    67   return (_flags | f) ;
    68 }
    70 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
    71 #ifdef ASSERT
    72   // Read once.
    73   volatile Bytecodes::Code c = bytecode_1();
    74   assert(c == 0 || c == code || code == 0, "update must be consistent");
    75 #endif
    76   // Need to flush pending stores here before bytecode is written.
    77   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
    78 }
    80 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
    81 #ifdef ASSERT
    82   // Read once.
    83   volatile Bytecodes::Code c = bytecode_2();
    84   assert(c == 0 || c == code || code == 0, "update must be consistent");
    85 #endif
    86   // Need to flush pending stores here before bytecode is written.
    87   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
    88 }
    90 // Atomically sets f1 if it is still NULL, otherwise it keeps the
    91 // current value.
    92 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
    93     // Use barriers as in oop_store
    94     HeapWord* f1_addr = (HeapWord*) &_f1;
    95     update_barrier_set_pre(f1_addr, f1);
    96     void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
    97     bool success = (result == NULL);
    98     if (success) {
    99       update_barrier_set((void*) f1_addr, f1);
   100     }
   101   }
   103 #ifdef ASSERT
   104 // It is possible to have two different dummy methodOops created
   105 // when the resolve code for invoke interface executes concurrently
   106 // Hence the assertion below is weakened a bit for the invokeinterface
   107 // case.
   108 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
   109   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
   110          ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
   111          ((methodOop)f1)->signature());
   112 }
   113 #endif
   115 // Note that concurrent update of both bytecodes can leave one of them
   116 // reset to zero.  This is harmless; the interpreter will simply re-resolve
   117 // the damaged entry.  More seriously, the memory synchronization is needed
   118 // to flush other fields (f1, f2) completely to memory before the bytecodes
   119 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
   120 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
   121                                        Bytecodes::Code put_code,
   122                                        KlassHandle field_holder,
   123                                        int orig_field_index,
   124                                        int field_offset,
   125                                        TosState field_type,
   126                                        bool is_final,
   127                                        bool is_volatile) {
   128   set_f1(field_holder());
   129   set_f2(field_offset);
   130   // The field index is used by jvm/ti and is the index into fields() array
   131   // in holder instanceKlass.  This is scaled by instanceKlass::next_offset.
   132   assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
   133   const int field_index = orig_field_index / instanceKlass::next_offset;
   134   assert(field_index <= field_index_mask,
   135          "field index does not fit in low flag bits");
   136   set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
   137             (field_index & field_index_mask));
   138   set_bytecode_1(get_code);
   139   set_bytecode_2(put_code);
   140   NOT_PRODUCT(verify(tty));
   141 }
   143 int  ConstantPoolCacheEntry::field_index() const {
   144   return (_flags & field_index_mask) * instanceKlass::next_offset;
   145 }
   147 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
   148                                         methodHandle method,
   149                                         int vtable_index) {
   150   assert(!is_secondary_entry(), "");
   151   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   152   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   153   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
   155   int byte_no = -1;
   156   bool needs_vfinal_flag = false;
   157   switch (invoke_code) {
   158     case Bytecodes::_invokevirtual:
   159     case Bytecodes::_invokeinterface: {
   160         if (method->can_be_statically_bound()) {
   161           set_f2((intptr_t)method());
   162           needs_vfinal_flag = true;
   163         } else {
   164           assert(vtable_index >= 0, "valid index");
   165           set_f2(vtable_index);
   166         }
   167         byte_no = 2;
   168         break;
   169     }
   171     case Bytecodes::_invokedynamic:  // similar to _invokevirtual
   172       if (TraceInvokeDynamic) {
   173         tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
   174                       (is_secondary_entry() ? " secondary" : ""),
   175                       (intptr_t)method(), vtable_index);
   176         method->print();
   177         this->print(tty, 0);
   178       }
   179       assert(method->can_be_statically_bound(), "must be a MH invoker method");
   180       assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
   181       // SystemDictionary::find_method_handle_invoke only caches
   182       // methods which signature classes are on the boot classpath,
   183       // otherwise the newly created method is returned.  To avoid
   184       // races in that case we store the first one coming in into the
   185       // cp-cache atomically if it's still unset.
   186       set_f1_if_null_atomic(method());
   187       needs_vfinal_flag = false;  // _f2 is not an oop
   188       assert(!is_vfinal(), "f2 not an oop");
   189       byte_no = 1;  // coordinate this with bytecode_number & is_resolved
   190       break;
   192     case Bytecodes::_invokespecial:
   193       // Preserve the value of the vfinal flag on invokevirtual bytecode
   194       // which may be shared with this constant pool cache entry.
   195       needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
   196       // fall through
   197     case Bytecodes::_invokestatic:
   198       set_f1(method());
   199       byte_no = 1;
   200       break;
   201     default:
   202       ShouldNotReachHere();
   203       break;
   204   }
   206   set_flags(as_flags(as_TosState(method->result_type()),
   207                      method->is_final_method(),
   208                      needs_vfinal_flag,
   209                      false,
   210                      change_to_virtual,
   211                      true)|
   212             method()->size_of_parameters());
   214   // Note:  byte_no also appears in TemplateTable::resolve.
   215   if (byte_no == 1) {
   216     set_bytecode_1(invoke_code);
   217   } else if (byte_no == 2)  {
   218     if (change_to_virtual) {
   219       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
   220       //
   221       // Workaround for the case where we encounter an invokeinterface, but we
   222       // should really have an _invokevirtual since the resolved method is a
   223       // virtual method in java.lang.Object. This is a corner case in the spec
   224       // but is presumably legal. javac does not generate this code.
   225       //
   226       // We set bytecode_1() to _invokeinterface, because that is the
   227       // bytecode # used by the interpreter to see if it is resolved.
   228       // We set bytecode_2() to _invokevirtual.
   229       // See also interpreterRuntime.cpp. (8/25/2000)
   230       // Only set resolved for the invokeinterface case if method is public.
   231       // Otherwise, the method needs to be reresolved with caller for each
   232       // interface call.
   233       if (method->is_public()) set_bytecode_1(invoke_code);
   234       set_bytecode_2(Bytecodes::_invokevirtual);
   235     } else {
   236       set_bytecode_2(invoke_code);
   237     }
   238   } else {
   239     ShouldNotReachHere();
   240   }
   241   NOT_PRODUCT(verify(tty));
   242 }
   245 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
   246   assert(!is_secondary_entry(), "");
   247   klassOop interf = method->method_holder();
   248   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
   249   set_f1(interf);
   250   set_f2(index);
   251   set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
   252   set_bytecode_1(Bytecodes::_invokeinterface);
   253 }
   256 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
   257   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
   258   assert(_f2 == 0, "initialize once");
   259   assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
   260   set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
   261 }
   263 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
   264   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
   265   intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
   266   assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
   267   return (int) bsm_cache_index;
   268 }
   270 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
   271                                               methodHandle signature_invoker) {
   272   assert(is_secondary_entry(), "");
   273   int param_size = signature_invoker->size_of_parameters();
   274   assert(param_size >= 1, "method argument size must include MH.this");
   275   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   276   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
   277     // racing threads might be trying to install their own favorites
   278     set_f1(call_site());
   279   }
   280   bool is_final = true;
   281   assert(signature_invoker->is_final_method(), "is_final");
   282   set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
   283   // do not do set_bytecode on a secondary CP cache entry
   284   //set_bytecode_1(Bytecodes::_invokedynamic);
   285 }
   288 class LocalOopClosure: public OopClosure {
   289  private:
   290   void (*_f)(oop*);
   292  public:
   293   LocalOopClosure(void f(oop*))        { _f = f; }
   294   virtual void do_oop(oop* o)          { _f(o); }
   295   virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
   296 };
   299 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
   300   LocalOopClosure blk(f);
   301   oop_iterate(&blk);
   302 }
   305 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
   306   assert(in_words(size()) == 4, "check code below - may need adjustment");
   307   // field[1] is always oop or NULL
   308   blk->do_oop((oop*)&_f1);
   309   if (is_vfinal()) {
   310     blk->do_oop((oop*)&_f2);
   311   }
   312 }
   315 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
   316   assert(in_words(size()) == 4, "check code below - may need adjustment");
   317   // field[1] is always oop or NULL
   318   if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
   319   if (is_vfinal()) {
   320     if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
   321   }
   322 }
   325 void ConstantPoolCacheEntry::follow_contents() {
   326   assert(in_words(size()) == 4, "check code below - may need adjustment");
   327   // field[1] is always oop or NULL
   328   MarkSweep::mark_and_push((oop*)&_f1);
   329   if (is_vfinal()) {
   330     MarkSweep::mark_and_push((oop*)&_f2);
   331   }
   332 }
   334 #ifndef SERIALGC
   335 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
   336   assert(in_words(size()) == 4, "check code below - may need adjustment");
   337   // field[1] is always oop or NULL
   338   PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
   339   if (is_vfinal()) {
   340     PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
   341   }
   342 }
   343 #endif // SERIALGC
   345 void ConstantPoolCacheEntry::adjust_pointers() {
   346   assert(in_words(size()) == 4, "check code below - may need adjustment");
   347   // field[1] is always oop or NULL
   348   MarkSweep::adjust_pointer((oop*)&_f1);
   349   if (is_vfinal()) {
   350     MarkSweep::adjust_pointer((oop*)&_f2);
   351   }
   352 }
   354 #ifndef SERIALGC
   355 void ConstantPoolCacheEntry::update_pointers() {
   356   assert(in_words(size()) == 4, "check code below - may need adjustment");
   357   // field[1] is always oop or NULL
   358   PSParallelCompact::adjust_pointer((oop*)&_f1);
   359   if (is_vfinal()) {
   360     PSParallelCompact::adjust_pointer((oop*)&_f2);
   361   }
   362 }
   364 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
   365                                              HeapWord* end_addr) {
   366   assert(in_words(size()) == 4, "check code below - may need adjustment");
   367   // field[1] is always oop or NULL
   368   PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
   369   if (is_vfinal()) {
   370     PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
   371   }
   372 }
   373 #endif // SERIALGC
   375 // RedefineClasses() API support:
   376 // If this constantPoolCacheEntry refers to old_method then update it
   377 // to refer to new_method.
   378 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
   379        methodOop new_method, bool * trace_name_printed) {
   381   if (is_vfinal()) {
   382     // virtual and final so f2() contains method ptr instead of vtable index
   383     if (f2() == (intptr_t)old_method) {
   384       // match old_method so need an update
   385       _f2 = (intptr_t)new_method;
   386       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   387         if (!(*trace_name_printed)) {
   388           // RC_TRACE_MESG macro has an embedded ResourceMark
   389           RC_TRACE_MESG(("adjust: name=%s",
   390             Klass::cast(old_method->method_holder())->external_name()));
   391           *trace_name_printed = true;
   392         }
   393         // RC_TRACE macro has an embedded ResourceMark
   394         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
   395           new_method->name()->as_C_string(),
   396           new_method->signature()->as_C_string()));
   397       }
   399       return true;
   400     }
   402     // f1() is not used with virtual entries so bail out
   403     return false;
   404   }
   406   if ((oop)_f1 == NULL) {
   407     // NULL f1() means this is a virtual entry so bail out
   408     // We are assuming that the vtable index does not need change.
   409     return false;
   410   }
   412   if ((oop)_f1 == old_method) {
   413     _f1 = new_method;
   414     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   415       if (!(*trace_name_printed)) {
   416         // RC_TRACE_MESG macro has an embedded ResourceMark
   417         RC_TRACE_MESG(("adjust: name=%s",
   418           Klass::cast(old_method->method_holder())->external_name()));
   419         *trace_name_printed = true;
   420       }
   421       // RC_TRACE macro has an embedded ResourceMark
   422       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
   423         new_method->name()->as_C_string(),
   424         new_method->signature()->as_C_string()));
   425     }
   427     return true;
   428   }
   430   return false;
   431 }
   433 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
   434   if (!is_method_entry()) {
   435     // not a method entry so not interesting by default
   436     return false;
   437   }
   439   methodOop m = NULL;
   440   if (is_vfinal()) {
   441     // virtual and final so _f2 contains method ptr instead of vtable index
   442     m = (methodOop)_f2;
   443   } else if ((oop)_f1 == NULL) {
   444     // NULL _f1 means this is a virtual entry so also not interesting
   445     return false;
   446   } else {
   447     if (!((oop)_f1)->is_method()) {
   448       // _f1 can also contain a klassOop for an interface
   449       return false;
   450     }
   451     m = (methodOop)_f1;
   452   }
   454   assert(m != NULL && m->is_method(), "sanity check");
   455   if (m == NULL || !m->is_method() || m->method_holder() != k) {
   456     // robustness for above sanity checks or method is not in
   457     // the interesting class
   458     return false;
   459   }
   461   // the method is in the interesting class so the entry is interesting
   462   return true;
   463 }
   465 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
   466   // print separator
   467   if (index == 0) tty->print_cr("                 -------------");
   468   // print entry
   469   tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   470   if (is_secondary_entry())
   471     tty->print_cr("[%5d|secondary]", main_entry_index());
   472   else
   473     tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
   474   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
   475   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
   476   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
   477   tty->print_cr("                 -------------");
   478 }
   480 void ConstantPoolCacheEntry::verify(outputStream* st) const {
   481   // not implemented yet
   482 }
   484 // Implementation of ConstantPoolCache
   486 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
   487   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
   488   for (int i = 0; i < length(); i++) {
   489     ConstantPoolCacheEntry* e = entry_at(i);
   490     int original_index = inverse_index_map[i];
   491     if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
   492       int main_index = (original_index - Rewriter::_secondary_entry_tag);
   493       assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
   494       e->initialize_secondary_entry(main_index);
   495     } else {
   496       e->initialize_entry(original_index);
   497     }
   498     assert(entry_at(i) == e, "sanity");
   499   }
   500 }
   502 // RedefineClasses() API support:
   503 // If any entry of this constantPoolCache points to any of
   504 // old_methods, replace it with the corresponding new_method.
   505 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
   506                                                      int methods_length, bool * trace_name_printed) {
   508   if (methods_length == 0) {
   509     // nothing to do if there are no methods
   510     return;
   511   }
   513   // get shorthand for the interesting class
   514   klassOop old_holder = old_methods[0]->method_holder();
   516   for (int i = 0; i < length(); i++) {
   517     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
   518       // skip uninteresting methods
   519       continue;
   520     }
   522     // The constantPoolCache contains entries for several different
   523     // things, but we only care about methods. In fact, we only care
   524     // about methods in the same class as the one that contains the
   525     // old_methods. At this point, we have an interesting entry.
   527     for (int j = 0; j < methods_length; j++) {
   528       methodOop old_method = old_methods[j];
   529       methodOop new_method = new_methods[j];
   531       if (entry_at(i)->adjust_method_entry(old_method, new_method,
   532           trace_name_printed)) {
   533         // current old_method matched this entry and we updated it so
   534         // break out and get to the next interesting entry if there one
   535         break;
   536       }
   537     }
   538   }
   539 }

mercurial