src/share/vm/oops/cpCache.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/oops/cpCache.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,691 @@
     1.4 +/*
     1.5 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/shared/markSweep.inline.hpp"
    1.30 +#include "interpreter/interpreter.hpp"
    1.31 +#include "interpreter/rewriter.hpp"
    1.32 +#include "memory/universe.inline.hpp"
    1.33 +#include "oops/cpCache.hpp"
    1.34 +#include "oops/objArrayOop.hpp"
    1.35 +#include "oops/oop.inline.hpp"
    1.36 +#include "prims/jvmtiRedefineClassesTrace.hpp"
    1.37 +#include "prims/methodHandles.hpp"
    1.38 +#include "runtime/handles.inline.hpp"
    1.39 +#include "utilities/macros.hpp"
    1.40 +#if INCLUDE_ALL_GCS
    1.41 +# include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
    1.42 +#endif // INCLUDE_ALL_GCS
    1.43 +
    1.44 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    1.45 +
    1.46 +// Implementation of ConstantPoolCacheEntry
    1.47 +
    1.48 +void ConstantPoolCacheEntry::initialize_entry(int index) {
    1.49 +  assert(0 < index && index < 0x10000, "sanity check");
    1.50 +  _indices = index;
    1.51 +  _f1 = NULL;
    1.52 +  _f2 = _flags = 0;
    1.53 +  assert(constant_pool_index() == index, "");
    1.54 +}
    1.55 +
    1.56 +int ConstantPoolCacheEntry::make_flags(TosState state,
    1.57 +                                       int option_bits,
    1.58 +                                       int field_index_or_method_params) {
    1.59 +  assert(state < number_of_states, "Invalid state in make_flags");
    1.60 +  int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
    1.61 +  // Preserve existing flag bit values
    1.62 +  // The low bits are a field offset, or else the method parameter size.
    1.63 +#ifdef ASSERT
    1.64 +  TosState old_state = flag_state();
    1.65 +  assert(old_state == (TosState)0 || old_state == state,
    1.66 +         "inconsistent cpCache flags state");
    1.67 +#endif
    1.68 +  return (_flags | f) ;
    1.69 +}
    1.70 +
    1.71 +void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
    1.72 +#ifdef ASSERT
    1.73 +  // Read once.
    1.74 +  volatile Bytecodes::Code c = bytecode_1();
    1.75 +  assert(c == 0 || c == code || code == 0, "update must be consistent");
    1.76 +#endif
    1.77 +  // Need to flush pending stores here before bytecode is written.
    1.78 +  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
    1.79 +}
    1.80 +
    1.81 +void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
    1.82 +#ifdef ASSERT
    1.83 +  // Read once.
    1.84 +  volatile Bytecodes::Code c = bytecode_2();
    1.85 +  assert(c == 0 || c == code || code == 0, "update must be consistent");
    1.86 +#endif
    1.87 +  // Need to flush pending stores here before bytecode is written.
    1.88 +  OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
    1.89 +}
    1.90 +
    1.91 +// Sets f1, ordering with previous writes.
    1.92 +void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
    1.93 +  assert(f1 != NULL, "");
    1.94 +  OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
    1.95 +}
    1.96 +
    1.97 +// Sets flags, but only if the value was previously zero.
    1.98 +bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
    1.99 +  intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
   1.100 +  return (result == 0);
   1.101 +}
   1.102 +
   1.103 +// Note that concurrent update of both bytecodes can leave one of them
   1.104 +// reset to zero.  This is harmless; the interpreter will simply re-resolve
   1.105 +// the damaged entry.  More seriously, the memory synchronization is needed
   1.106 +// to flush other fields (f1, f2) completely to memory before the bytecodes
   1.107 +// are updated, lest other processors see a non-zero bytecode but zero f1/f2.
   1.108 +void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
   1.109 +                                       Bytecodes::Code put_code,
   1.110 +                                       KlassHandle field_holder,
   1.111 +                                       int field_index,
   1.112 +                                       int field_offset,
   1.113 +                                       TosState field_type,
   1.114 +                                       bool is_final,
   1.115 +                                       bool is_volatile,
   1.116 +                                       Klass* root_klass) {
   1.117 +  set_f1(field_holder());
   1.118 +  set_f2(field_offset);
   1.119 +  assert((field_index & field_index_mask) == field_index,
   1.120 +         "field index does not fit in low flag bits");
   1.121 +  set_field_flags(field_type,
   1.122 +                  ((is_volatile ? 1 : 0) << is_volatile_shift) |
   1.123 +                  ((is_final    ? 1 : 0) << is_final_shift),
   1.124 +                  field_index);
   1.125 +  set_bytecode_1(get_code);
   1.126 +  set_bytecode_2(put_code);
   1.127 +  NOT_PRODUCT(verify(tty));
   1.128 +}
   1.129 +
   1.130 +void ConstantPoolCacheEntry::set_parameter_size(int value) {
   1.131 +  // This routine is called only in corner cases where the CPCE is not yet initialized.
   1.132 +  // See AbstractInterpreter::deopt_continue_after_entry.
   1.133 +  assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
   1.134 +         err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
   1.135 +  // Setting the parameter size by itself is only safe if the
   1.136 +  // current value of _flags is 0, otherwise another thread may have
   1.137 +  // updated it and we don't want to overwrite that value.  Don't
   1.138 +  // bother trying to update it once it's nonzero but always make
   1.139 +  // sure that the final parameter size agrees with what was passed.
   1.140 +  if (_flags == 0) {
   1.141 +    Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
   1.142 +  }
   1.143 +  guarantee(parameter_size() == value,
   1.144 +            err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
   1.145 +}
   1.146 +
   1.147 +void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
   1.148 +                                                       methodHandle method,
   1.149 +                                                       int vtable_index) {
   1.150 +  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
   1.151 +  assert(method->interpreter_entry() != NULL, "should have been set at this point");
   1.152 +  assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   1.153 +
   1.154 +  int byte_no = -1;
   1.155 +  bool change_to_virtual = false;
   1.156 +
   1.157 +  switch (invoke_code) {
   1.158 +    case Bytecodes::_invokeinterface:
   1.159 +      // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
   1.160 +      // instruction somehow links to a non-interface method (in Object).
   1.161 +      // In that case, the method has no itable index and must be invoked as a virtual.
   1.162 +      // Set a flag to keep track of this corner case.
   1.163 +      change_to_virtual = true;
   1.164 +
   1.165 +      // ...and fall through as if we were handling invokevirtual:
   1.166 +    case Bytecodes::_invokevirtual:
   1.167 +      {
   1.168 +        if (!is_vtable_call) {
   1.169 +          assert(method->can_be_statically_bound(), "");
   1.170 +          // set_f2_as_vfinal_method checks if is_vfinal flag is true.
   1.171 +          set_method_flags(as_TosState(method->result_type()),
   1.172 +                           (                             1      << is_vfinal_shift) |
   1.173 +                           ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
   1.174 +                           ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
   1.175 +                           method()->size_of_parameters());
   1.176 +          set_f2_as_vfinal_method(method());
   1.177 +        } else {
   1.178 +          assert(!method->can_be_statically_bound(), "");
   1.179 +          assert(vtable_index >= 0, "valid index");
   1.180 +          assert(!method->is_final_method(), "sanity");
   1.181 +          set_method_flags(as_TosState(method->result_type()),
   1.182 +                           ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
   1.183 +                           method()->size_of_parameters());
   1.184 +          set_f2(vtable_index);
   1.185 +        }
   1.186 +        byte_no = 2;
   1.187 +        break;
   1.188 +      }
   1.189 +
   1.190 +    case Bytecodes::_invokespecial:
   1.191 +    case Bytecodes::_invokestatic:
   1.192 +      assert(!is_vtable_call, "");
   1.193 +      // Note:  Read and preserve the value of the is_vfinal flag on any
   1.194 +      // invokevirtual bytecode shared with this constant pool cache entry.
   1.195 +      // It is cheap and safe to consult is_vfinal() at all times.
   1.196 +      // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
   1.197 +      set_method_flags(as_TosState(method->result_type()),
   1.198 +                       ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
   1.199 +                       ((method->is_final_method() ? 1 : 0) << is_final_shift),
   1.200 +                       method()->size_of_parameters());
   1.201 +      set_f1(method());
   1.202 +      byte_no = 1;
   1.203 +      break;
   1.204 +    default:
   1.205 +      ShouldNotReachHere();
   1.206 +      break;
   1.207 +  }
   1.208 +
   1.209 +  // Note:  byte_no also appears in TemplateTable::resolve.
   1.210 +  if (byte_no == 1) {
   1.211 +    assert(invoke_code != Bytecodes::_invokevirtual &&
   1.212 +           invoke_code != Bytecodes::_invokeinterface, "");
   1.213 +    set_bytecode_1(invoke_code);
   1.214 +  } else if (byte_no == 2)  {
   1.215 +    if (change_to_virtual) {
   1.216 +      assert(invoke_code == Bytecodes::_invokeinterface, "");
   1.217 +      // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
   1.218 +      //
   1.219 +      // Workaround for the case where we encounter an invokeinterface, but we
   1.220 +      // should really have an _invokevirtual since the resolved method is a
   1.221 +      // virtual method in java.lang.Object. This is a corner case in the spec
   1.222 +      // but is presumably legal. javac does not generate this code.
   1.223 +      //
   1.224 +      // We set bytecode_1() to _invokeinterface, because that is the
   1.225 +      // bytecode # used by the interpreter to see if it is resolved.
   1.226 +      // We set bytecode_2() to _invokevirtual.
   1.227 +      // See also interpreterRuntime.cpp. (8/25/2000)
   1.228 +      // Only set resolved for the invokeinterface case if method is public.
   1.229 +      // Otherwise, the method needs to be reresolved with caller for each
   1.230 +      // interface call.
   1.231 +      if (method->is_public()) set_bytecode_1(invoke_code);
   1.232 +    } else {
   1.233 +      assert(invoke_code == Bytecodes::_invokevirtual, "");
   1.234 +    }
   1.235 +    // set up for invokevirtual, even if linking for invokeinterface also:
   1.236 +    set_bytecode_2(Bytecodes::_invokevirtual);
   1.237 +  } else {
   1.238 +    ShouldNotReachHere();
   1.239 +  }
   1.240 +  NOT_PRODUCT(verify(tty));
   1.241 +}
   1.242 +
   1.243 +void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
   1.244 +  int index = Method::nonvirtual_vtable_index;
   1.245 +  // index < 0; FIXME: inline and customize set_direct_or_vtable_call
   1.246 +  set_direct_or_vtable_call(invoke_code, method, index);
   1.247 +}
   1.248 +
   1.249 +void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
   1.250 +  // either the method is a miranda or its holder should accept the given index
   1.251 +  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
   1.252 +  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
   1.253 +  set_direct_or_vtable_call(invoke_code, method, index);
   1.254 +}
   1.255 +
   1.256 +void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
   1.257 +  assert(method->method_holder()->verify_itable_index(index), "");
   1.258 +  assert(invoke_code == Bytecodes::_invokeinterface, "");
   1.259 +  InstanceKlass* interf = method->method_holder();
   1.260 +  assert(interf->is_interface(), "must be an interface");
   1.261 +  assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
   1.262 +  set_f1(interf);
   1.263 +  set_f2(index);
   1.264 +  set_method_flags(as_TosState(method->result_type()),
   1.265 +                   0,  // no option bits
   1.266 +                   method()->size_of_parameters());
   1.267 +  set_bytecode_1(Bytecodes::_invokeinterface);
   1.268 +}
   1.269 +
   1.270 +
   1.271 +void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, const CallInfo &call_info) {
   1.272 +  set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);
   1.273 +}
   1.274 +
   1.275 +void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, const CallInfo &call_info) {
   1.276 +  set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info);
   1.277 +}
   1.278 +
   1.279 +void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
   1.280 +                                                      Bytecodes::Code invoke_code,
   1.281 +                                                      const CallInfo &call_info) {
   1.282 +  // NOTE: This CPCE can be the subject of data races.
   1.283 +  // There are three words to update: flags, refs[f2], f1 (in that order).
   1.284 +  // Writers must store all other values before f1.
   1.285 +  // Readers must test f1 first for non-null before reading other fields.
   1.286 +  // Competing writers must acquire exclusive access via a lock.
   1.287 +  // A losing writer waits on the lock until the winner writes f1 and leaves
   1.288 +  // the lock, so that when the losing writer returns, he can use the linked
   1.289 +  // cache entry.
   1.290 +
   1.291 +  MonitorLockerEx ml(cpool->lock());
   1.292 +  if (!is_f1_null()) {
   1.293 +    return;
   1.294 +  }
   1.295 +
   1.296 +  const methodHandle adapter = call_info.resolved_method();
   1.297 +  const Handle appendix      = call_info.resolved_appendix();
   1.298 +  const Handle method_type   = call_info.resolved_method_type();
   1.299 +  const bool has_appendix    = appendix.not_null();
   1.300 +  const bool has_method_type = method_type.not_null();
   1.301 +
   1.302 +  // Write the flags.
   1.303 +  set_method_flags(as_TosState(adapter->result_type()),
   1.304 +                   ((has_appendix    ? 1 : 0) << has_appendix_shift   ) |
   1.305 +                   ((has_method_type ? 1 : 0) << has_method_type_shift) |
   1.306 +                   (                   1      << is_final_shift       ),
   1.307 +                   adapter->size_of_parameters());
   1.308 +
   1.309 +  if (TraceInvokeDynamic) {
   1.310 +    tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
   1.311 +                  invoke_code,
   1.312 +                  (void *)appendix(),    (has_appendix    ? "" : " (unused)"),
   1.313 +                  (void *)method_type(), (has_method_type ? "" : " (unused)"),
   1.314 +                  (intptr_t)adapter());
   1.315 +    adapter->print();
   1.316 +    if (has_appendix)  appendix()->print();
   1.317 +  }
   1.318 +
   1.319 +  // Method handle invokes and invokedynamic sites use both cp cache words.
   1.320 +  // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
   1.321 +  // In the general case, this could be the call site's MethodType,
   1.322 +  // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
   1.323 +  // f1 contains the adapter method which manages the actual call.
   1.324 +  // In the general case, this is a compiled LambdaForm.
   1.325 +  // (The Java code is free to optimize these calls by binding other
   1.326 +  // sorts of methods and appendices to call sites.)
   1.327 +  // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
   1.328 +  // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
   1.329 +  // Even with the appendix, the method will never take more than 255 parameter slots.
   1.330 +  //
   1.331 +  // This means that given a call site like (List)mh.invoke("foo"),
   1.332 +  // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
   1.333 +  // not '(Ljava/lang/String;)Ljava/util/List;'.
   1.334 +  // The fact that String and List are involved is encoded in the MethodType in refs[f2].
   1.335 +  // This allows us to create fewer method oops, while keeping type safety.
   1.336 +  //
   1.337 +
   1.338 +  objArrayHandle resolved_references = cpool->resolved_references();
   1.339 +  // Store appendix, if any.
   1.340 +  if (has_appendix) {
   1.341 +    const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
   1.342 +    assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
   1.343 +    assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
   1.344 +    resolved_references->obj_at_put(appendix_index, appendix());
   1.345 +  }
   1.346 +
   1.347 +  // Store MethodType, if any.
   1.348 +  if (has_method_type) {
   1.349 +    const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
   1.350 +    assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
   1.351 +    assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
   1.352 +    resolved_references->obj_at_put(method_type_index, method_type());
   1.353 +  }
   1.354 +
   1.355 +  release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!
   1.356 +
   1.357 +  // The interpreter assembly code does not check byte_2,
   1.358 +  // but it is used by is_resolved, method_if_resolved, etc.
   1.359 +  set_bytecode_1(invoke_code);
   1.360 +  NOT_PRODUCT(verify(tty));
   1.361 +  if (TraceInvokeDynamic) {
   1.362 +    this->print(tty, 0);
   1.363 +  }
   1.364 +}
   1.365 +
   1.366 +Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
   1.367 +  // Decode the action of set_method and set_interface_call
   1.368 +  Bytecodes::Code invoke_code = bytecode_1();
   1.369 +  if (invoke_code != (Bytecodes::Code)0) {
   1.370 +    Metadata* f1 = f1_ord();
   1.371 +    if (f1 != NULL) {
   1.372 +      switch (invoke_code) {
   1.373 +      case Bytecodes::_invokeinterface:
   1.374 +        assert(f1->is_klass(), "");
   1.375 +        return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());
   1.376 +      case Bytecodes::_invokestatic:
   1.377 +      case Bytecodes::_invokespecial:
   1.378 +        assert(!has_appendix(), "");
   1.379 +      case Bytecodes::_invokehandle:
   1.380 +      case Bytecodes::_invokedynamic:
   1.381 +        assert(f1->is_method(), "");
   1.382 +        return (Method*)f1;
   1.383 +      }
   1.384 +    }
   1.385 +  }
   1.386 +  invoke_code = bytecode_2();
   1.387 +  if (invoke_code != (Bytecodes::Code)0) {
   1.388 +    switch (invoke_code) {
   1.389 +    case Bytecodes::_invokevirtual:
   1.390 +      if (is_vfinal()) {
   1.391 +        // invokevirtual
   1.392 +        Method* m = f2_as_vfinal_method();
   1.393 +        assert(m->is_method(), "");
   1.394 +        return m;
   1.395 +      } else {
   1.396 +        int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
   1.397 +        if (cpool->tag_at(holder_index).is_klass()) {
   1.398 +          Klass* klass = cpool->resolved_klass_at(holder_index);
   1.399 +          if (!klass->oop_is_instance())
   1.400 +            klass = SystemDictionary::Object_klass();
   1.401 +          return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());
   1.402 +        }
   1.403 +      }
   1.404 +      break;
   1.405 +    }
   1.406 +  }
   1.407 +  return NULL;
   1.408 +}
   1.409 +
   1.410 +
   1.411 +oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
   1.412 +  if (!has_appendix())
   1.413 +    return NULL;
   1.414 +  const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
   1.415 +  objArrayOop resolved_references = cpool->resolved_references();
   1.416 +  return resolved_references->obj_at(ref_index);
   1.417 +}
   1.418 +
   1.419 +
   1.420 +oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
   1.421 +  if (!has_method_type())
   1.422 +    return NULL;
   1.423 +  const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
   1.424 +  objArrayOop resolved_references = cpool->resolved_references();
   1.425 +  return resolved_references->obj_at(ref_index);
   1.426 +}
   1.427 +
   1.428 +
   1.429 +#if INCLUDE_JVMTI
   1.430 +// RedefineClasses() API support:
   1.431 +// If this ConstantPoolCacheEntry refers to old_method then update it
   1.432 +// to refer to new_method.
   1.433 +bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
   1.434 +       Method* new_method, bool * trace_name_printed) {
   1.435 +
   1.436 +  if (is_vfinal()) {
   1.437 +    // virtual and final so _f2 contains method ptr instead of vtable index
   1.438 +    if (f2_as_vfinal_method() == old_method) {
   1.439 +      // match old_method so need an update
   1.440 +      // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
   1.441 +      _f2 = (intptr_t)new_method;
   1.442 +      if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   1.443 +        if (!(*trace_name_printed)) {
   1.444 +          // RC_TRACE_MESG macro has an embedded ResourceMark
   1.445 +          RC_TRACE_MESG(("adjust: name=%s",
   1.446 +            old_method->method_holder()->external_name()));
   1.447 +          *trace_name_printed = true;
   1.448 +        }
   1.449 +        // RC_TRACE macro has an embedded ResourceMark
   1.450 +        RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
   1.451 +          new_method->name()->as_C_string(),
   1.452 +          new_method->signature()->as_C_string()));
   1.453 +      }
   1.454 +
   1.455 +      return true;
   1.456 +    }
   1.457 +
   1.458 +    // f1() is not used with virtual entries so bail out
   1.459 +    return false;
   1.460 +  }
   1.461 +
   1.462 +  if (_f1 == NULL) {
   1.463 +    // NULL f1() means this is a virtual entry so bail out
   1.464 +    // We are assuming that the vtable index does not need change.
   1.465 +    return false;
   1.466 +  }
   1.467 +
   1.468 +  if (_f1 == old_method) {
   1.469 +    _f1 = new_method;
   1.470 +    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   1.471 +      if (!(*trace_name_printed)) {
   1.472 +        // RC_TRACE_MESG macro has an embedded ResourceMark
   1.473 +        RC_TRACE_MESG(("adjust: name=%s",
   1.474 +          old_method->method_holder()->external_name()));
   1.475 +        *trace_name_printed = true;
   1.476 +      }
   1.477 +      // RC_TRACE macro has an embedded ResourceMark
   1.478 +      RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
   1.479 +        new_method->name()->as_C_string(),
   1.480 +        new_method->signature()->as_C_string()));
   1.481 +    }
   1.482 +
   1.483 +    return true;
   1.484 +  }
   1.485 +
   1.486 +  return false;
   1.487 +}
   1.488 +
   1.489 +// a constant pool cache entry should never contain old or obsolete methods
   1.490 +bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
   1.491 +  if (is_vfinal()) {
   1.492 +    // virtual and final so _f2 contains method ptr instead of vtable index
   1.493 +    Metadata* f2 = (Metadata*)_f2;
   1.494 +    // Return false if _f2 refers to an old or an obsolete method.
   1.495 +    // _f2 == NULL || !_f2->is_method() are just as unexpected here.
   1.496 +    return (f2 != NULL NOT_PRODUCT(&& f2->is_valid()) && f2->is_method() &&
   1.497 +            !((Method*)f2)->is_old() && !((Method*)f2)->is_obsolete());
   1.498 +  } else if (_f1 == NULL ||
   1.499 +             (NOT_PRODUCT(_f1->is_valid() &&) !_f1->is_method())) {
   1.500 +    // _f1 == NULL || !_f1->is_method() are OK here
   1.501 +    return true;
   1.502 +  }
   1.503 +  // return false if _f1 refers to an old or an obsolete method
   1.504 +  return (NOT_PRODUCT(_f1->is_valid() &&) _f1->is_method() &&
   1.505 +          !((Method*)_f1)->is_old() && !((Method*)_f1)->is_obsolete());
   1.506 +}
   1.507 +
   1.508 +bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {
   1.509 +  if (!is_method_entry()) {
   1.510 +    // not a method entry so not interesting by default
   1.511 +    return false;
   1.512 +  }
   1.513 +
   1.514 +  Method* m = NULL;
   1.515 +  if (is_vfinal()) {
   1.516 +    // virtual and final so _f2 contains method ptr instead of vtable index
   1.517 +    m = f2_as_vfinal_method();
   1.518 +  } else if (is_f1_null()) {
   1.519 +    // NULL _f1 means this is a virtual entry so also not interesting
   1.520 +    return false;
   1.521 +  } else {
   1.522 +    if (!(_f1->is_method())) {
   1.523 +      // _f1 can also contain a Klass* for an interface
   1.524 +      return false;
   1.525 +    }
   1.526 +    m = f1_as_method();
   1.527 +  }
   1.528 +
   1.529 +  assert(m != NULL && m->is_method(), "sanity check");
   1.530 +  if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
   1.531 +    // robustness for above sanity checks or method is not in
   1.532 +    // the interesting class
   1.533 +    return false;
   1.534 +  }
   1.535 +
   1.536 +  // the method is in the interesting class so the entry is interesting
   1.537 +  return true;
   1.538 +}
   1.539 +#endif // INCLUDE_JVMTI
   1.540 +
   1.541 +void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
   1.542 +  // print separator
   1.543 +  if (index == 0) st->print_cr("                 -------------");
   1.544 +  // print entry
   1.545 +  st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   1.546 +  st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(),
   1.547 +               constant_pool_index());
   1.548 +  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f1);
   1.549 +  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
   1.550 +  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
   1.551 +  st->print_cr("                 -------------");
   1.552 +}
   1.553 +
   1.554 +void ConstantPoolCacheEntry::verify(outputStream* st) const {
   1.555 +  // not implemented yet
   1.556 +}
   1.557 +
   1.558 +// Implementation of ConstantPoolCache
   1.559 +
   1.560 +ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
   1.561 +                                     const intStack& index_map,
   1.562 +                                     const intStack& invokedynamic_index_map,
   1.563 +                                     const intStack& invokedynamic_map, TRAPS) {
   1.564 +
   1.565 +  const int length = index_map.length() + invokedynamic_index_map.length();
   1.566 +  int size = ConstantPoolCache::size(length);
   1.567 +
   1.568 +  return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
   1.569 +    ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
   1.570 +}
   1.571 +
   1.572 +void ConstantPoolCache::initialize(const intArray& inverse_index_map,
   1.573 +                                   const intArray& invokedynamic_inverse_index_map,
   1.574 +                                   const intArray& invokedynamic_references_map) {
   1.575 +  for (int i = 0; i < inverse_index_map.length(); i++) {
   1.576 +    ConstantPoolCacheEntry* e = entry_at(i);
   1.577 +    int original_index = inverse_index_map[i];
   1.578 +    e->initialize_entry(original_index);
   1.579 +    assert(entry_at(i) == e, "sanity");
   1.580 +  }
   1.581 +
   1.582 +  // Append invokedynamic entries at the end
   1.583 +  int invokedynamic_offset = inverse_index_map.length();
   1.584 +  for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
   1.585 +    int offset = i + invokedynamic_offset;
   1.586 +    ConstantPoolCacheEntry* e = entry_at(offset);
   1.587 +    int original_index = invokedynamic_inverse_index_map[i];
   1.588 +    e->initialize_entry(original_index);
   1.589 +    assert(entry_at(offset) == e, "sanity");
   1.590 +  }
   1.591 +
   1.592 +  for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
   1.593 +    const int cpci = invokedynamic_references_map[ref];
   1.594 +    if (cpci >= 0) {
   1.595 +#ifdef ASSERT
   1.596 +      // invokedynamic and invokehandle have more entries; check if they
   1.597 +      // all point to the same constant pool cache entry.
   1.598 +      for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
   1.599 +        const int cpci_next = invokedynamic_references_map[ref + entry];
   1.600 +        assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));
   1.601 +      }
   1.602 +#endif
   1.603 +      entry_at(cpci)->initialize_resolved_reference_index(ref);
   1.604 +      ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1;  // skip extra entries
   1.605 +    }
   1.606 +  }
   1.607 +}
   1.608 +
   1.609 +#if INCLUDE_JVMTI
   1.610 +// RedefineClasses() API support:
   1.611 +// If any entry of this ConstantPoolCache points to any of
   1.612 +// old_methods, replace it with the corresponding new_method.
   1.613 +void ConstantPoolCache::adjust_method_entries(Method** old_methods, Method** new_methods,
   1.614 +                                                     int methods_length, bool * trace_name_printed) {
   1.615 +
   1.616 +  if (methods_length == 0) {
   1.617 +    // nothing to do if there are no methods
   1.618 +    return;
   1.619 +  }
   1.620 +
   1.621 +  // get shorthand for the interesting class
   1.622 +  Klass* old_holder = old_methods[0]->method_holder();
   1.623 +
   1.624 +  for (int i = 0; i < length(); i++) {
   1.625 +    if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
   1.626 +      // skip uninteresting methods
   1.627 +      continue;
   1.628 +    }
   1.629 +
   1.630 +    // The ConstantPoolCache contains entries for several different
   1.631 +    // things, but we only care about methods. In fact, we only care
   1.632 +    // about methods in the same class as the one that contains the
   1.633 +    // old_methods. At this point, we have an interesting entry.
   1.634 +
   1.635 +    for (int j = 0; j < methods_length; j++) {
   1.636 +      Method* old_method = old_methods[j];
   1.637 +      Method* new_method = new_methods[j];
   1.638 +
   1.639 +      if (entry_at(i)->adjust_method_entry(old_method, new_method,
   1.640 +          trace_name_printed)) {
   1.641 +        // current old_method matched this entry and we updated it so
   1.642 +        // break out and get to the next interesting entry if there one
   1.643 +        break;
   1.644 +      }
   1.645 +    }
   1.646 +  }
   1.647 +}
   1.648 +
   1.649 +// the constant pool cache should never contain old or obsolete methods
   1.650 +bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
   1.651 +  for (int i = 1; i < length(); i++) {
   1.652 +    if (entry_at(i)->is_interesting_method_entry(NULL) &&
   1.653 +        !entry_at(i)->check_no_old_or_obsolete_entries()) {
   1.654 +      return false;
   1.655 +    }
   1.656 +  }
   1.657 +  return true;
   1.658 +}
   1.659 +
   1.660 +void ConstantPoolCache::dump_cache() {
   1.661 +  for (int i = 1; i < length(); i++) {
   1.662 +    if (entry_at(i)->is_interesting_method_entry(NULL)) {
   1.663 +      entry_at(i)->print(tty, i);
   1.664 +    }
   1.665 +  }
   1.666 +}
   1.667 +#endif // INCLUDE_JVMTI
   1.668 +
   1.669 +
   1.670 +// Printing
   1.671 +
   1.672 +void ConstantPoolCache::print_on(outputStream* st) const {
   1.673 +  assert(is_constantPoolCache(), "obj must be constant pool cache");
   1.674 +  st->print_cr("%s", internal_name());
   1.675 +  // print constant pool cache entries
   1.676 +  for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);
   1.677 +}
   1.678 +
   1.679 +void ConstantPoolCache::print_value_on(outputStream* st) const {
   1.680 +  assert(is_constantPoolCache(), "obj must be constant pool cache");
   1.681 +  st->print("cache [%d]", length());
   1.682 +  print_address_on(st);
   1.683 +  st->print(" for ");
   1.684 +  constant_pool()->print_value_on(st);
   1.685 +}
   1.686 +
   1.687 +
   1.688 +// Verification
   1.689 +
   1.690 +void ConstantPoolCache::verify_on(outputStream* st) {
   1.691 +  guarantee(is_constantPoolCache(), "obj must be constant pool cache");
   1.692 +  // print constant pool cache entries
   1.693 +  for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
   1.694 +}

mercurial