src/share/vm/interpreter/rewriter.cpp

Wed, 13 Nov 2013 16:42:24 -0500

author
coleenp
date
Wed, 13 Nov 2013 16:42:24 -0500
changeset 6081
41cb10cbfb3c
parent 4712
3efdfd6ddbf2
child 6121
d61a1a166f44
permissions
-rw-r--r--

8025937: assert(existing_f1 == NULL || existing_f1 == f1) failed: illegal field change
Summary: Create extra constant pool cache entries for invokespecial/InterfaceMethodref to hold the alternate resolution.
Reviewed-by: jrose, lfoltan, hseigel

     1 /*
     2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "interpreter/bytecodes.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "interpreter/rewriter.hpp"
    29 #include "memory/gcLocker.hpp"
    30 #include "memory/resourceArea.hpp"
    31 #include "oops/generateOopMap.hpp"
    32 #include "prims/methodHandles.hpp"
    34 // Computes a CPC map (new_index -> original_index) for constant pool entries
    35 // that are referred to by the interpreter at runtime via the constant pool cache.
    36 // Also computes a CP map (original_index -> new_index).
    37 // Marks entries in CP which require additional processing.
    38 void Rewriter::compute_index_maps() {
    39   const int length  = _pool->length();
    40   init_maps(length);
    41   bool saw_mh_symbol = false;
    42   for (int i = 0; i < length; i++) {
    43     int tag = _pool->tag_at(i).value();
    44     switch (tag) {
    45       case JVM_CONSTANT_InterfaceMethodref:
    46       case JVM_CONSTANT_Fieldref          : // fall through
    47       case JVM_CONSTANT_Methodref         : // fall through
    48         add_cp_cache_entry(i);
    49         break;
    50       case JVM_CONSTANT_String:
    51       case JVM_CONSTANT_MethodHandle      : // fall through
    52       case JVM_CONSTANT_MethodType        : // fall through
    53         add_resolved_references_entry(i);
    54         break;
    55       case JVM_CONSTANT_Utf8:
    56         if (_pool->symbol_at(i) == vmSymbols::java_lang_invoke_MethodHandle())
    57           saw_mh_symbol = true;
    58         break;
    59     }
    60   }
    62   // Record limits of resolved reference map for constant pool cache indices
    63   record_map_limits();
    65   guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
    66             "all cp cache indexes fit in a u2");
    68   if (saw_mh_symbol)
    69     _method_handle_invokers.initialize(length, (int)0);
    70 }
    72 // Unrewrite the bytecodes if an error occurs.
    73 void Rewriter::restore_bytecodes(TRAPS) {
    74   int len = _methods->length();
    76   for (int i = len-1; i >= 0; i--) {
    77     Method* method = _methods->at(i);
    78     scan_method(method, true, CHECK);
    79   }
    80 }
    82 // Creates a constant pool cache given a CPC map
    83 void Rewriter::make_constant_pool_cache(TRAPS) {
    84   ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
    85   ConstantPoolCache* cache =
    86       ConstantPoolCache::allocate(loader_data, _cp_cache_map,
    87                                   _invokedynamic_cp_cache_map,
    88                                   _invokedynamic_references_map, CHECK);
    90   // initialize object cache in constant pool
    91   _pool->initialize_resolved_references(loader_data, _resolved_references_map,
    92                                         _resolved_reference_limit,
    93                                         CHECK);
    94   _pool->set_cache(cache);
    95   cache->set_constant_pool(_pool());
    96 }
   100 // The new finalization semantics says that registration of
   101 // finalizable objects must be performed on successful return from the
   102 // Object.<init> constructor.  We could implement this trivially if
   103 // <init> were never rewritten but since JVMTI allows this to occur, a
   104 // more complicated solution is required.  A special return bytecode
   105 // is used only by Object.<init> to signal the finalization
   106 // registration point.  Additionally local 0 must be preserved so it's
   107 // available to pass to the registration function.  For simplicty we
   108 // require that local 0 is never overwritten so it's available as an
   109 // argument for registration.
   111 void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
   112   RawBytecodeStream bcs(method);
   113   while (!bcs.is_last_bytecode()) {
   114     Bytecodes::Code opcode = bcs.raw_next();
   115     switch (opcode) {
   116       case Bytecodes::_return: *bcs.bcp() = Bytecodes::_return_register_finalizer; break;
   118       case Bytecodes::_istore:
   119       case Bytecodes::_lstore:
   120       case Bytecodes::_fstore:
   121       case Bytecodes::_dstore:
   122       case Bytecodes::_astore:
   123         if (bcs.get_index() != 0) continue;
   125         // fall through
   126       case Bytecodes::_istore_0:
   127       case Bytecodes::_lstore_0:
   128       case Bytecodes::_fstore_0:
   129       case Bytecodes::_dstore_0:
   130       case Bytecodes::_astore_0:
   131         THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
   132                   "can't overwrite local 0 in Object.<init>");
   133         break;
   134     }
   135   }
   136 }
   139 // Rewrite a classfile-order CP index into a native-order CPC index.
   140 void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
   141   address p = bcp + offset;
   142   if (!reverse) {
   143     int  cp_index    = Bytes::get_Java_u2(p);
   144     int  cache_index = cp_entry_to_cp_cache(cp_index);
   145     Bytes::put_native_u2(p, cache_index);
   146     if (!_method_handle_invokers.is_empty())
   147       maybe_rewrite_invokehandle(p - 1, cp_index, cache_index, reverse);
   148   } else {
   149     int cache_index = Bytes::get_native_u2(p);
   150     int pool_index = cp_cache_entry_pool_index(cache_index);
   151     Bytes::put_Java_u2(p, pool_index);
   152     if (!_method_handle_invokers.is_empty())
   153       maybe_rewrite_invokehandle(p - 1, pool_index, cache_index, reverse);
   154   }
   155 }
   157 // If the constant pool entry for invokespecial is InterfaceMethodref,
   158 // we need to add a separate cpCache entry for its resolution, because it is
   159 // different than the resolution for invokeinterface with InterfaceMethodref.
   160 // These cannot share cpCache entries.  It's unclear if all invokespecial to
   161 // InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
   162 // is created for each one.  This was added with lambda.
   163 void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) {
   164   static int count = 0;
   165   address p = bcp + offset;
   166   if (!reverse) {
   167     int cp_index = Bytes::get_Java_u2(p);
   168     int cache_index = add_invokespecial_cp_cache_entry(cp_index);
   169     if (cache_index != (int)(jushort) cache_index) {
   170       THROW_MSG(vmSymbols::java_lang_InternalError(),
   171                 "This classfile overflows invokespecial for interfaces "
   172                 "and cannot be loaded");
   173     }
   174     Bytes::put_native_u2(p, cache_index);
   175   } else {
   176     int cache_index = Bytes::get_native_u2(p);
   177     int cp_index = cp_cache_entry_pool_index(cache_index);
   178     Bytes::put_Java_u2(p, cp_index);
   179   }
   180 }
   183 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
   184 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
   185   if (!reverse) {
   186     if ((*opc) == (u1)Bytecodes::_invokevirtual ||
   187         // allow invokespecial as an alias, although it would be very odd:
   188         (*opc) == (u1)Bytecodes::_invokespecial) {
   189       assert(_pool->tag_at(cp_index).is_method(), "wrong index");
   190       // Determine whether this is a signature-polymorphic method.
   191       if (cp_index >= _method_handle_invokers.length())  return;
   192       int status = _method_handle_invokers[cp_index];
   193       assert(status >= -1 && status <= 1, "oob tri-state");
   194       if (status == 0) {
   195         if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() &&
   196             MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(),
   197                                                          _pool->name_ref_at(cp_index))) {
   198           // we may need a resolved_refs entry for the appendix
   199           add_invokedynamic_resolved_references_entries(cp_index, cache_index);
   200           status = +1;
   201         } else {
   202           status = -1;
   203         }
   204         _method_handle_invokers[cp_index] = status;
   205       }
   206       // We use a special internal bytecode for such methods (if non-static).
   207       // The basic reason for this is that such methods need an extra "appendix" argument
   208       // to transmit the call site's intended call type.
   209       if (status > 0) {
   210         (*opc) = (u1)Bytecodes::_invokehandle;
   211       }
   212     }
   213   } else {
   214     // Do not need to look at cp_index.
   215     if ((*opc) == (u1)Bytecodes::_invokehandle) {
   216       (*opc) = (u1)Bytecodes::_invokevirtual;
   217       // Ignore corner case of original _invokespecial instruction.
   218       // This is safe because (a) the signature polymorphic method was final, and
   219       // (b) the implementation of MethodHandle will not call invokespecial on it.
   220     }
   221   }
   222 }
   225 void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
   226   address p = bcp + offset;
   227   assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
   228   if (!reverse) {
   229     int cp_index = Bytes::get_Java_u2(p);
   230     int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
   231     int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
   232     // Replace the trailing four bytes with a CPC index for the dynamic
   233     // call site.  Unlike other CPC entries, there is one per bytecode,
   234     // not just one per distinct CP entry.  In other words, the
   235     // CPC-to-CP relation is many-to-one for invokedynamic entries.
   236     // This means we must use a larger index size than u2 to address
   237     // all these entries.  That is the main reason invokedynamic
   238     // must have a five-byte instruction format.  (Of course, other JVM
   239     // implementations can use the bytes for other purposes.)
   240     // Note: We use native_u4 format exclusively for 4-byte indexes.
   241     Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
   242     // add the bcp in case we need to patch this bytecode if we also find a
   243     // invokespecial/InterfaceMethodref in the bytecode stream
   244     _patch_invokedynamic_bcps->push(p);
   245     _patch_invokedynamic_refs->push(resolved_index);
   246   } else {
   247     int cache_index = ConstantPool::decode_invokedynamic_index(
   248                         Bytes::get_native_u4(p));
   249     // We will reverse the bytecode rewriting _after_ adjusting them.
   250     // Adjust the cache index by offset to the invokedynamic entries in the
   251     // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
   252     cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
   253     int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
   254     assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
   255     // zero out 4 bytes
   256     Bytes::put_Java_u4(p, 0);
   257     Bytes::put_Java_u2(p, cp_index);
   258   }
   259 }
   261 void Rewriter::patch_invokedynamic_bytecodes() {
   262   // If the end of the cp_cache is the same as after initializing with the
   263   // cpool, nothing needs to be done.  Invokedynamic bytecodes are at the
   264   // correct offsets. ie. no invokespecials added
   265   int delta = cp_cache_delta();
   266   if (delta > 0) {
   267     int length = _patch_invokedynamic_bcps->length();
   268     assert(length == _patch_invokedynamic_refs->length(),
   269            "lengths should match");
   270     for (int i = 0; i < length; i++) {
   271       address p = _patch_invokedynamic_bcps->at(i);
   272       int cache_index = ConstantPool::decode_invokedynamic_index(
   273                           Bytes::get_native_u4(p));
   274       Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta));
   276       // invokedynamic resolved references map also points to cp cache and must
   277       // add delta to each.
   278       int resolved_index = _patch_invokedynamic_refs->at(i);
   279       for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
   280         assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
   281              "should be the same index");
   282         _invokedynamic_references_map.at_put(resolved_index+entry,
   283                                              cache_index + delta);
   284       }
   285     }
   286   }
   287 }
   290 // Rewrite some ldc bytecodes to _fast_aldc
   291 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
   292                                  bool reverse) {
   293   if (!reverse) {
   294     assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
   295     address p = bcp + offset;
   296     int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
   297     constantTag tag = _pool->tag_at(cp_index).value();
   298     if (tag.is_method_handle() || tag.is_method_type() || tag.is_string()) {
   299       int ref_index = cp_entry_to_resolved_references(cp_index);
   300       if (is_wide) {
   301         (*bcp) = Bytecodes::_fast_aldc_w;
   302         assert(ref_index == (u2)ref_index, "index overflow");
   303         Bytes::put_native_u2(p, ref_index);
   304       } else {
   305         (*bcp) = Bytecodes::_fast_aldc;
   306         assert(ref_index == (u1)ref_index, "index overflow");
   307         (*p) = (u1)ref_index;
   308       }
   309     }
   310   } else {
   311     Bytecodes::Code rewritten_bc =
   312               (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
   313     if ((*bcp) == rewritten_bc) {
   314       address p = bcp + offset;
   315       int ref_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
   316       int pool_index = resolved_references_entry_to_pool_index(ref_index);
   317       if (is_wide) {
   318         (*bcp) = Bytecodes::_ldc_w;
   319         assert(pool_index == (u2)pool_index, "index overflow");
   320         Bytes::put_Java_u2(p, pool_index);
   321       } else {
   322         (*bcp) = Bytecodes::_ldc;
   323         assert(pool_index == (u1)pool_index, "index overflow");
   324         (*p) = (u1)pool_index;
   325       }
   326     }
   327   }
   328 }
   331 // Rewrites a method given the index_map information
   332 void Rewriter::scan_method(Method* method, bool reverse, TRAPS) {
   334   int nof_jsrs = 0;
   335   bool has_monitor_bytecodes = false;
   337   {
   338     // We cannot tolerate a GC in this block, because we've
   339     // cached the bytecodes in 'code_base'. If the Method*
   340     // moves, the bytecodes will also move.
   341     No_Safepoint_Verifier nsv;
   342     Bytecodes::Code c;
   344     // Bytecodes and their length
   345     const address code_base = method->code_base();
   346     const int code_length = method->code_size();
   348     int bc_length;
   349     for (int bci = 0; bci < code_length; bci += bc_length) {
   350       address bcp = code_base + bci;
   351       int prefix_length = 0;
   352       c = (Bytecodes::Code)(*bcp);
   354       // Since we have the code, see if we can get the length
   355       // directly. Some more complicated bytecodes will report
   356       // a length of zero, meaning we need to make another method
   357       // call to calculate the length.
   358       bc_length = Bytecodes::length_for(c);
   359       if (bc_length == 0) {
   360         bc_length = Bytecodes::length_at(method, bcp);
   362         // length_at will put us at the bytecode after the one modified
   363         // by 'wide'. We don't currently examine any of the bytecodes
   364         // modified by wide, but in case we do in the future...
   365         if (c == Bytecodes::_wide) {
   366           prefix_length = 1;
   367           c = (Bytecodes::Code)bcp[1];
   368         }
   369       }
   371       assert(bc_length != 0, "impossible bytecode length");
   373       switch (c) {
   374         case Bytecodes::_lookupswitch   : {
   375 #ifndef CC_INTERP
   376           Bytecode_lookupswitch bc(method, bcp);
   377           (*bcp) = (
   378             bc.number_of_pairs() < BinarySwitchThreshold
   379             ? Bytecodes::_fast_linearswitch
   380             : Bytecodes::_fast_binaryswitch
   381           );
   382 #endif
   383           break;
   384         }
   385         case Bytecodes::_fast_linearswitch:
   386         case Bytecodes::_fast_binaryswitch: {
   387 #ifndef CC_INTERP
   388           (*bcp) = Bytecodes::_lookupswitch;
   389 #endif
   390           break;
   391         }
   393         case Bytecodes::_invokespecial  : {
   394           int offset = prefix_length + 1;
   395           address p = bcp + offset;
   396           int cp_index = Bytes::get_Java_u2(p);
   397           // InterfaceMethodref
   398           if (_pool->tag_at(cp_index).is_interface_method()) {
   399             rewrite_invokespecial(bcp, offset, reverse, CHECK);
   400           } else {
   401             rewrite_member_reference(bcp, offset, reverse);
   402           }
   403           break;
   404         }
   406         case Bytecodes::_getstatic      : // fall through
   407         case Bytecodes::_putstatic      : // fall through
   408         case Bytecodes::_getfield       : // fall through
   409         case Bytecodes::_putfield       : // fall through
   410         case Bytecodes::_invokevirtual  : // fall through
   411         case Bytecodes::_invokestatic   :
   412         case Bytecodes::_invokeinterface:
   413         case Bytecodes::_invokehandle   : // if reverse=true
   414           rewrite_member_reference(bcp, prefix_length+1, reverse);
   415           break;
   416         case Bytecodes::_invokedynamic:
   417           rewrite_invokedynamic(bcp, prefix_length+1, reverse);
   418           break;
   419         case Bytecodes::_ldc:
   420         case Bytecodes::_fast_aldc:  // if reverse=true
   421           maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
   422           break;
   423         case Bytecodes::_ldc_w:
   424         case Bytecodes::_fast_aldc_w:  // if reverse=true
   425           maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
   426           break;
   427         case Bytecodes::_jsr            : // fall through
   428         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
   429         case Bytecodes::_monitorenter   : // fall through
   430         case Bytecodes::_monitorexit    : has_monitor_bytecodes = true; break;
   431       }
   432     }
   433   }
   435   // Update access flags
   436   if (has_monitor_bytecodes) {
   437     method->set_has_monitor_bytecodes();
   438   }
   440   // The present of a jsr bytecode implies that the method might potentially
   441   // have to be rewritten, so we run the oopMapGenerator on the method
   442   if (nof_jsrs > 0) {
   443     method->set_has_jsrs();
   444     // Second pass will revisit this method.
   445     assert(method->has_jsrs(), "didn't we just set this?");
   446   }
   447 }
   449 // After constant pool is created, revisit methods containing jsrs.
   450 methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
   451   ResourceMark rm(THREAD);
   452   ResolveOopMapConflicts romc(method);
   453   methodHandle original_method = method;
   454   method = romc.do_potential_rewrite(CHECK_(methodHandle()));
   455   // Update monitor matching info.
   456   if (romc.monitor_safe()) {
   457     method->set_guaranteed_monitor_matching();
   458   }
   460   return method;
   461 }
   463 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   464   ResourceMark rm(THREAD);
   465   Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
   466   // (That's all, folks.)
   467 }
   470 Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
   471   : _klass(klass),
   472     _pool(cpool),
   473     _methods(methods)
   474 {
   475   assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
   477   // determine index maps for Method* rewriting
   478   compute_index_maps();
   480   if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
   481     bool did_rewrite = false;
   482     int i = _methods->length();
   483     while (i-- > 0) {
   484       Method* method = _methods->at(i);
   485       if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
   486         // rewrite the return bytecodes of Object.<init> to register the
   487         // object for finalization if needed.
   488         methodHandle m(THREAD, method);
   489         rewrite_Object_init(m, CHECK);
   490         did_rewrite = true;
   491         break;
   492       }
   493     }
   494     assert(did_rewrite, "must find Object::<init> to rewrite it");
   495   }
   497   // rewrite methods, in two passes
   498   int len = _methods->length();
   500   for (int i = len-1; i >= 0; i--) {
   501     Method* method = _methods->at(i);
   502     scan_method(method, false, CHECK);  // If you get an error here,
   503                                         // there is no reversing bytecodes
   504   }
   506   // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
   507   // entries had to be added.
   508   patch_invokedynamic_bytecodes();
   510   // allocate constant pool cache, now that we've seen all the bytecodes
   511   make_constant_pool_cache(THREAD);
   513   // Restore bytecodes to their unrewritten state if there are exceptions
   514   // rewriting bytecodes or allocating the cpCache
   515   if (HAS_PENDING_EXCEPTION) {
   516     restore_bytecodes(CATCH);
   517     return;
   518   }
   520   // Relocate after everything, but still do this under the is_rewritten flag,
   521   // so methods with jsrs in custom class lists in aren't attempted to be
   522   // rewritten in the RO section of the shared archive.
   523   // Relocated bytecodes don't have to be restored, only the cp cache entries
   524   for (int i = len-1; i >= 0; i--) {
   525     methodHandle m(THREAD, _methods->at(i));
   527     if (m->has_jsrs()) {
   528       m = rewrite_jsrs(m, THREAD);
   529       // Restore bytecodes to their unrewritten state if there are exceptions
   530       // relocating bytecodes.  If some are relocated, that is ok because that
   531       // doesn't affect constant pool to cpCache rewriting.
   532       if (HAS_PENDING_EXCEPTION) {
   533         restore_bytecodes(CATCH);
   534         return;
   535       }
   536       // Method might have gotten rewritten.
   537       methods->at_put(i, m());
   538     }
   539   }
   540 }

mercurial