src/share/vm/oops/cpCache.cpp

Mon, 28 May 2018 10:33:52 +0800

author
aoqi
date
Mon, 28 May 2018 10:33:52 +0800
changeset 9041
95a08233f46c
parent 8997
f8a45a60bc6b
parent 8856
ac27a9c85bea
child 9122
024be04bb151
permissions
-rw-r--r--

Merge

aoqi@0 1 /*
dbuck@8997 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/shared/markSweep.inline.hpp"
aoqi@0 27 #include "interpreter/interpreter.hpp"
aoqi@0 28 #include "interpreter/rewriter.hpp"
aoqi@0 29 #include "memory/universe.inline.hpp"
aoqi@0 30 #include "oops/cpCache.hpp"
aoqi@0 31 #include "oops/objArrayOop.hpp"
aoqi@0 32 #include "oops/oop.inline.hpp"
aoqi@0 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
aoqi@0 34 #include "prims/methodHandles.hpp"
aoqi@0 35 #include "runtime/handles.inline.hpp"
goetz@6911 36 #include "runtime/orderAccess.inline.hpp"
aoqi@0 37 #include "utilities/macros.hpp"
aoqi@0 38 #if INCLUDE_ALL_GCS
aoqi@0 39 # include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
aoqi@0 40 #endif // INCLUDE_ALL_GCS
aoqi@0 41
aoqi@0 42 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 43
aoqi@0 44 // Implementation of ConstantPoolCacheEntry
aoqi@0 45
aoqi@0 46 void ConstantPoolCacheEntry::initialize_entry(int index) {
aoqi@0 47 assert(0 < index && index < 0x10000, "sanity check");
aoqi@0 48 _indices = index;
aoqi@0 49 _f1 = NULL;
aoqi@0 50 _f2 = _flags = 0;
aoqi@0 51 assert(constant_pool_index() == index, "");
aoqi@0 52 }
aoqi@0 53
aoqi@0 54 int ConstantPoolCacheEntry::make_flags(TosState state,
aoqi@0 55 int option_bits,
aoqi@0 56 int field_index_or_method_params) {
aoqi@0 57 assert(state < number_of_states, "Invalid state in make_flags");
aoqi@0 58 int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
aoqi@0 59 // Preserve existing flag bit values
aoqi@0 60 // The low bits are a field offset, or else the method parameter size.
aoqi@0 61 #ifdef ASSERT
aoqi@0 62 TosState old_state = flag_state();
aoqi@0 63 assert(old_state == (TosState)0 || old_state == state,
aoqi@0 64 "inconsistent cpCache flags state");
aoqi@0 65 #endif
aoqi@0 66 return (_flags | f) ;
aoqi@0 67 }
aoqi@0 68
aoqi@0 69 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
aoqi@0 70 #ifdef ASSERT
aoqi@0 71 // Read once.
aoqi@0 72 volatile Bytecodes::Code c = bytecode_1();
aoqi@0 73 assert(c == 0 || c == code || code == 0, "update must be consistent");
aoqi@0 74 #endif
aoqi@0 75 // Need to flush pending stores here before bytecode is written.
aoqi@0 76 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
aoqi@0 77 }
aoqi@0 78
aoqi@0 79 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
aoqi@0 80 #ifdef ASSERT
aoqi@0 81 // Read once.
aoqi@0 82 volatile Bytecodes::Code c = bytecode_2();
aoqi@0 83 assert(c == 0 || c == code || code == 0, "update must be consistent");
aoqi@0 84 #endif
aoqi@0 85 // Need to flush pending stores here before bytecode is written.
aoqi@0 86 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
aoqi@0 87 }
aoqi@0 88
aoqi@0 89 // Sets f1, ordering with previous writes.
aoqi@0 90 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
aoqi@0 91 assert(f1 != NULL, "");
aoqi@0 92 OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
aoqi@0 93 }
aoqi@0 94
aoqi@0 95 // Sets flags, but only if the value was previously zero.
aoqi@0 96 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
aoqi@0 97 intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
aoqi@0 98 return (result == 0);
aoqi@0 99 }
aoqi@0 100
aoqi@0 101 // Note that concurrent update of both bytecodes can leave one of them
aoqi@0 102 // reset to zero. This is harmless; the interpreter will simply re-resolve
aoqi@0 103 // the damaged entry. More seriously, the memory synchronization is needed
aoqi@0 104 // to flush other fields (f1, f2) completely to memory before the bytecodes
aoqi@0 105 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
aoqi@0 106 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
aoqi@0 107 Bytecodes::Code put_code,
aoqi@0 108 KlassHandle field_holder,
aoqi@0 109 int field_index,
aoqi@0 110 int field_offset,
aoqi@0 111 TosState field_type,
aoqi@0 112 bool is_final,
aoqi@0 113 bool is_volatile,
aoqi@0 114 Klass* root_klass) {
aoqi@0 115 set_f1(field_holder());
aoqi@0 116 set_f2(field_offset);
aoqi@0 117 assert((field_index & field_index_mask) == field_index,
aoqi@0 118 "field index does not fit in low flag bits");
aoqi@0 119 set_field_flags(field_type,
aoqi@0 120 ((is_volatile ? 1 : 0) << is_volatile_shift) |
aoqi@0 121 ((is_final ? 1 : 0) << is_final_shift),
aoqi@0 122 field_index);
aoqi@0 123 set_bytecode_1(get_code);
aoqi@0 124 set_bytecode_2(put_code);
aoqi@0 125 NOT_PRODUCT(verify(tty));
aoqi@0 126 }
aoqi@0 127
aoqi@0 128 void ConstantPoolCacheEntry::set_parameter_size(int value) {
aoqi@0 129 // This routine is called only in corner cases where the CPCE is not yet initialized.
aoqi@0 130 // See AbstractInterpreter::deopt_continue_after_entry.
aoqi@0 131 assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
aoqi@0 132 err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
aoqi@0 133 // Setting the parameter size by itself is only safe if the
aoqi@0 134 // current value of _flags is 0, otherwise another thread may have
aoqi@0 135 // updated it and we don't want to overwrite that value. Don't
aoqi@0 136 // bother trying to update it once it's nonzero but always make
aoqi@0 137 // sure that the final parameter size agrees with what was passed.
aoqi@0 138 if (_flags == 0) {
aoqi@0 139 Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
aoqi@0 140 }
aoqi@0 141 guarantee(parameter_size() == value,
aoqi@0 142 err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
aoqi@0 143 }
aoqi@0 144
aoqi@0 145 void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
aoqi@0 146 methodHandle method,
coleenp@8739 147 int vtable_index,
coleenp@8739 148 bool sender_is_interface) {
aoqi@0 149 bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean
aoqi@0 150 assert(method->interpreter_entry() != NULL, "should have been set at this point");
aoqi@0 151 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
aoqi@0 152
aoqi@0 153 int byte_no = -1;
aoqi@0 154 bool change_to_virtual = false;
aoqi@0 155
aoqi@0 156 switch (invoke_code) {
aoqi@0 157 case Bytecodes::_invokeinterface:
aoqi@0 158 // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
aoqi@0 159 // instruction somehow links to a non-interface method (in Object).
aoqi@0 160 // In that case, the method has no itable index and must be invoked as a virtual.
aoqi@0 161 // Set a flag to keep track of this corner case.
aoqi@0 162 change_to_virtual = true;
aoqi@0 163
aoqi@0 164 // ...and fall through as if we were handling invokevirtual:
aoqi@0 165 case Bytecodes::_invokevirtual:
aoqi@0 166 {
aoqi@0 167 if (!is_vtable_call) {
aoqi@0 168 assert(method->can_be_statically_bound(), "");
aoqi@0 169 // set_f2_as_vfinal_method checks if is_vfinal flag is true.
aoqi@0 170 set_method_flags(as_TosState(method->result_type()),
aoqi@0 171 ( 1 << is_vfinal_shift) |
aoqi@0 172 ((method->is_final_method() ? 1 : 0) << is_final_shift) |
aoqi@0 173 ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
aoqi@0 174 method()->size_of_parameters());
aoqi@0 175 set_f2_as_vfinal_method(method());
aoqi@0 176 } else {
aoqi@0 177 assert(!method->can_be_statically_bound(), "");
aoqi@0 178 assert(vtable_index >= 0, "valid index");
aoqi@0 179 assert(!method->is_final_method(), "sanity");
aoqi@0 180 set_method_flags(as_TosState(method->result_type()),
aoqi@0 181 ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
aoqi@0 182 method()->size_of_parameters());
aoqi@0 183 set_f2(vtable_index);
aoqi@0 184 }
aoqi@0 185 byte_no = 2;
aoqi@0 186 break;
aoqi@0 187 }
aoqi@0 188
aoqi@0 189 case Bytecodes::_invokespecial:
aoqi@0 190 case Bytecodes::_invokestatic:
aoqi@0 191 assert(!is_vtable_call, "");
aoqi@0 192 // Note: Read and preserve the value of the is_vfinal flag on any
aoqi@0 193 // invokevirtual bytecode shared with this constant pool cache entry.
aoqi@0 194 // It is cheap and safe to consult is_vfinal() at all times.
aoqi@0 195 // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
aoqi@0 196 set_method_flags(as_TosState(method->result_type()),
aoqi@0 197 ((is_vfinal() ? 1 : 0) << is_vfinal_shift) |
aoqi@0 198 ((method->is_final_method() ? 1 : 0) << is_final_shift),
aoqi@0 199 method()->size_of_parameters());
aoqi@0 200 set_f1(method());
aoqi@0 201 byte_no = 1;
aoqi@0 202 break;
aoqi@0 203 default:
aoqi@0 204 ShouldNotReachHere();
aoqi@0 205 break;
aoqi@0 206 }
aoqi@0 207
aoqi@0 208 // Note: byte_no also appears in TemplateTable::resolve.
aoqi@0 209 if (byte_no == 1) {
aoqi@0 210 assert(invoke_code != Bytecodes::_invokevirtual &&
aoqi@0 211 invoke_code != Bytecodes::_invokeinterface, "");
coleenp@8739 212 // Don't mark invokespecial to method as resolved if sender is an interface. The receiver
coleenp@8739 213 // has to be checked that it is a subclass of the current class every time this bytecode
coleenp@8739 214 // is executed.
coleenp@8739 215 if (invoke_code != Bytecodes::_invokespecial || !sender_is_interface ||
coleenp@8739 216 method->name() == vmSymbols::object_initializer_name()) {
aoqi@0 217 set_bytecode_1(invoke_code);
coleenp@8739 218 }
aoqi@0 219 } else if (byte_no == 2) {
aoqi@0 220 if (change_to_virtual) {
aoqi@0 221 assert(invoke_code == Bytecodes::_invokeinterface, "");
aoqi@0 222 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
aoqi@0 223 //
aoqi@0 224 // Workaround for the case where we encounter an invokeinterface, but we
aoqi@0 225 // should really have an _invokevirtual since the resolved method is a
aoqi@0 226 // virtual method in java.lang.Object. This is a corner case in the spec
aoqi@0 227 // but is presumably legal. javac does not generate this code.
aoqi@0 228 //
aoqi@0 229 // We set bytecode_1() to _invokeinterface, because that is the
aoqi@0 230 // bytecode # used by the interpreter to see if it is resolved.
aoqi@0 231 // We set bytecode_2() to _invokevirtual.
aoqi@0 232 // See also interpreterRuntime.cpp. (8/25/2000)
aoqi@0 233 // Only set resolved for the invokeinterface case if method is public.
aoqi@0 234 // Otherwise, the method needs to be reresolved with caller for each
aoqi@0 235 // interface call.
aoqi@0 236 if (method->is_public()) set_bytecode_1(invoke_code);
aoqi@0 237 } else {
aoqi@0 238 assert(invoke_code == Bytecodes::_invokevirtual, "");
aoqi@0 239 }
aoqi@0 240 // set up for invokevirtual, even if linking for invokeinterface also:
aoqi@0 241 set_bytecode_2(Bytecodes::_invokevirtual);
aoqi@0 242 } else {
aoqi@0 243 ShouldNotReachHere();
aoqi@0 244 }
aoqi@0 245 NOT_PRODUCT(verify(tty));
aoqi@0 246 }
aoqi@0 247
coleenp@8739 248 void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method,
coleenp@8739 249 bool sender_is_interface) {
aoqi@0 250 int index = Method::nonvirtual_vtable_index;
aoqi@0 251 // index < 0; FIXME: inline and customize set_direct_or_vtable_call
coleenp@8739 252 set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface);
aoqi@0 253 }
aoqi@0 254
aoqi@0 255 void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
aoqi@0 256 // either the method is a miranda or its holder should accept the given index
aoqi@0 257 assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
aoqi@0 258 // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
coleenp@8739 259 set_direct_or_vtable_call(invoke_code, method, index, false);
aoqi@0 260 }
aoqi@0 261
dbuck@8997 262 void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code,
dbuck@8997 263 KlassHandle referenced_klass,
dbuck@8997 264 methodHandle method, int index) {
aoqi@0 265 assert(method->method_holder()->verify_itable_index(index), "");
aoqi@0 266 assert(invoke_code == Bytecodes::_invokeinterface, "");
aoqi@0 267 InstanceKlass* interf = method->method_holder();
aoqi@0 268 assert(interf->is_interface(), "must be an interface");
aoqi@0 269 assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
dbuck@8997 270 set_f1(referenced_klass());
dbuck@8997 271 set_f2((intx)method());
aoqi@0 272 set_method_flags(as_TosState(method->result_type()),
aoqi@0 273 0, // no option bits
aoqi@0 274 method()->size_of_parameters());
aoqi@0 275 set_bytecode_1(Bytecodes::_invokeinterface);
aoqi@0 276 }
aoqi@0 277
aoqi@0 278
aoqi@0 279 void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, const CallInfo &call_info) {
aoqi@0 280 set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);
aoqi@0 281 }
aoqi@0 282
aoqi@0 283 void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, const CallInfo &call_info) {
aoqi@0 284 set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info);
aoqi@0 285 }
aoqi@0 286
aoqi@0 287 void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
aoqi@0 288 Bytecodes::Code invoke_code,
aoqi@0 289 const CallInfo &call_info) {
aoqi@0 290 // NOTE: This CPCE can be the subject of data races.
aoqi@0 291 // There are three words to update: flags, refs[f2], f1 (in that order).
aoqi@0 292 // Writers must store all other values before f1.
aoqi@0 293 // Readers must test f1 first for non-null before reading other fields.
aoqi@0 294 // Competing writers must acquire exclusive access via a lock.
aoqi@0 295 // A losing writer waits on the lock until the winner writes f1 and leaves
aoqi@0 296 // the lock, so that when the losing writer returns, he can use the linked
aoqi@0 297 // cache entry.
aoqi@0 298
aoqi@0 299 MonitorLockerEx ml(cpool->lock());
aoqi@0 300 if (!is_f1_null()) {
aoqi@0 301 return;
aoqi@0 302 }
aoqi@0 303
aoqi@0 304 const methodHandle adapter = call_info.resolved_method();
aoqi@0 305 const Handle appendix = call_info.resolved_appendix();
aoqi@0 306 const Handle method_type = call_info.resolved_method_type();
aoqi@0 307 const bool has_appendix = appendix.not_null();
aoqi@0 308 const bool has_method_type = method_type.not_null();
aoqi@0 309
aoqi@0 310 // Write the flags.
aoqi@0 311 set_method_flags(as_TosState(adapter->result_type()),
aoqi@0 312 ((has_appendix ? 1 : 0) << has_appendix_shift ) |
aoqi@0 313 ((has_method_type ? 1 : 0) << has_method_type_shift) |
aoqi@0 314 ( 1 << is_final_shift ),
aoqi@0 315 adapter->size_of_parameters());
aoqi@0 316
aoqi@0 317 if (TraceInvokeDynamic) {
aoqi@0 318 tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
aoqi@0 319 invoke_code,
aoqi@0 320 (void *)appendix(), (has_appendix ? "" : " (unused)"),
aoqi@0 321 (void *)method_type(), (has_method_type ? "" : " (unused)"),
aoqi@0 322 (intptr_t)adapter());
aoqi@0 323 adapter->print();
aoqi@0 324 if (has_appendix) appendix()->print();
aoqi@0 325 }
aoqi@0 326
aoqi@0 327 // Method handle invokes and invokedynamic sites use both cp cache words.
aoqi@0 328 // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
aoqi@0 329 // In the general case, this could be the call site's MethodType,
aoqi@0 330 // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
aoqi@0 331 // f1 contains the adapter method which manages the actual call.
aoqi@0 332 // In the general case, this is a compiled LambdaForm.
aoqi@0 333 // (The Java code is free to optimize these calls by binding other
aoqi@0 334 // sorts of methods and appendices to call sites.)
aoqi@0 335 // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
aoqi@0 336 // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
aoqi@0 337 // Even with the appendix, the method will never take more than 255 parameter slots.
aoqi@0 338 //
aoqi@0 339 // This means that given a call site like (List)mh.invoke("foo"),
aoqi@0 340 // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
aoqi@0 341 // not '(Ljava/lang/String;)Ljava/util/List;'.
aoqi@0 342 // The fact that String and List are involved is encoded in the MethodType in refs[f2].
aoqi@0 343 // This allows us to create fewer method oops, while keeping type safety.
aoqi@0 344 //
aoqi@0 345
aoqi@0 346 objArrayHandle resolved_references = cpool->resolved_references();
aoqi@0 347 // Store appendix, if any.
aoqi@0 348 if (has_appendix) {
aoqi@0 349 const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
aoqi@0 350 assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
aoqi@0 351 assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
aoqi@0 352 resolved_references->obj_at_put(appendix_index, appendix());
aoqi@0 353 }
aoqi@0 354
aoqi@0 355 // Store MethodType, if any.
aoqi@0 356 if (has_method_type) {
aoqi@0 357 const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
aoqi@0 358 assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
aoqi@0 359 assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
aoqi@0 360 resolved_references->obj_at_put(method_type_index, method_type());
aoqi@0 361 }
aoqi@0 362
aoqi@0 363 release_set_f1(adapter()); // This must be the last one to set (see NOTE above)!
aoqi@0 364
aoqi@0 365 // The interpreter assembly code does not check byte_2,
aoqi@0 366 // but it is used by is_resolved, method_if_resolved, etc.
aoqi@0 367 set_bytecode_1(invoke_code);
aoqi@0 368 NOT_PRODUCT(verify(tty));
aoqi@0 369 if (TraceInvokeDynamic) {
aoqi@0 370 this->print(tty, 0);
aoqi@0 371 }
aoqi@0 372 }
aoqi@0 373
aoqi@0 374 Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
aoqi@0 375 // Decode the action of set_method and set_interface_call
aoqi@0 376 Bytecodes::Code invoke_code = bytecode_1();
aoqi@0 377 if (invoke_code != (Bytecodes::Code)0) {
aoqi@0 378 Metadata* f1 = f1_ord();
aoqi@0 379 if (f1 != NULL) {
aoqi@0 380 switch (invoke_code) {
aoqi@0 381 case Bytecodes::_invokeinterface:
aoqi@0 382 assert(f1->is_klass(), "");
aoqi@0 383 return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());
aoqi@0 384 case Bytecodes::_invokestatic:
aoqi@0 385 case Bytecodes::_invokespecial:
aoqi@0 386 assert(!has_appendix(), "");
aoqi@0 387 case Bytecodes::_invokehandle:
aoqi@0 388 case Bytecodes::_invokedynamic:
aoqi@0 389 assert(f1->is_method(), "");
aoqi@0 390 return (Method*)f1;
aoqi@0 391 }
aoqi@0 392 }
aoqi@0 393 }
aoqi@0 394 invoke_code = bytecode_2();
aoqi@0 395 if (invoke_code != (Bytecodes::Code)0) {
aoqi@0 396 switch (invoke_code) {
aoqi@0 397 case Bytecodes::_invokevirtual:
aoqi@0 398 if (is_vfinal()) {
aoqi@0 399 // invokevirtual
aoqi@0 400 Method* m = f2_as_vfinal_method();
aoqi@0 401 assert(m->is_method(), "");
aoqi@0 402 return m;
aoqi@0 403 } else {
aoqi@0 404 int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
aoqi@0 405 if (cpool->tag_at(holder_index).is_klass()) {
aoqi@0 406 Klass* klass = cpool->resolved_klass_at(holder_index);
aoqi@0 407 if (!klass->oop_is_instance())
aoqi@0 408 klass = SystemDictionary::Object_klass();
aoqi@0 409 return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());
aoqi@0 410 }
aoqi@0 411 }
aoqi@0 412 break;
aoqi@0 413 }
aoqi@0 414 }
aoqi@0 415 return NULL;
aoqi@0 416 }
aoqi@0 417
aoqi@0 418
aoqi@0 419 oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
aoqi@0 420 if (!has_appendix())
aoqi@0 421 return NULL;
aoqi@0 422 const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
aoqi@0 423 objArrayOop resolved_references = cpool->resolved_references();
aoqi@0 424 return resolved_references->obj_at(ref_index);
aoqi@0 425 }
aoqi@0 426
aoqi@0 427
aoqi@0 428 oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
aoqi@0 429 if (!has_method_type())
aoqi@0 430 return NULL;
aoqi@0 431 const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
aoqi@0 432 objArrayOop resolved_references = cpool->resolved_references();
aoqi@0 433 return resolved_references->obj_at(ref_index);
aoqi@0 434 }
aoqi@0 435
aoqi@0 436
aoqi@0 437 #if INCLUDE_JVMTI
dbuck@8997 438
dbuck@8997 439 void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {
dbuck@8997 440 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
dbuck@8997 441 if (!(*trace_name_printed)) {
dbuck@8997 442 // RC_TRACE_MESG macro has an embedded ResourceMark
dbuck@8997 443 RC_TRACE_MESG(("adjust: name=%s",
dbuck@8997 444 old_method->method_holder()->external_name()));
dbuck@8997 445 *trace_name_printed = true;
dbuck@8997 446 }
dbuck@8997 447 // RC_TRACE macro has an embedded ResourceMark
dbuck@8997 448 RC_TRACE(0x00400000, ("cpc %s entry update: %s(%s)",
dbuck@8997 449 entry_type,
dbuck@8997 450 new_method->name()->as_C_string(),
dbuck@8997 451 new_method->signature()->as_C_string()));
dbuck@8997 452 }
dbuck@8997 453 }
dbuck@8997 454
aoqi@0 455 // RedefineClasses() API support:
aoqi@0 456 // If this ConstantPoolCacheEntry refers to old_method then update it
aoqi@0 457 // to refer to new_method.
dbuck@8997 458 void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
aoqi@0 459 Method* new_method, bool * trace_name_printed) {
aoqi@0 460
aoqi@0 461 if (is_vfinal()) {
aoqi@0 462 // virtual and final so _f2 contains method ptr instead of vtable index
aoqi@0 463 if (f2_as_vfinal_method() == old_method) {
aoqi@0 464 // match old_method so need an update
aoqi@0 465 // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
aoqi@0 466 _f2 = (intptr_t)new_method;
aoqi@0 467 }
dbuck@8997 468 return;
aoqi@0 469 }
aoqi@0 470
dbuck@8997 471 assert (_f1 != NULL, "should not call with uninteresting entry");
dbuck@8997 472
dbuck@8997 473 if (!(_f1->is_method())) {
dbuck@8997 474 // _f1 is a Klass* for an interface, _f2 is the method
dbuck@8997 475 if (f2_as_interface_method() == old_method) {
dbuck@8997 476 _f2 = (intptr_t)new_method;
dbuck@8997 477 log_adjust("interface", old_method, new_method, trace_name_printed);
dbuck@8997 478 }
dbuck@8997 479 } else if (_f1 == old_method) {
dbuck@8997 480 _f1 = new_method;
dbuck@8997 481 log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed);
aoqi@0 482 }
aoqi@0 483 }
aoqi@0 484
aoqi@0 485 // a constant pool cache entry should never contain old or obsolete methods
aoqi@0 486 bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
dbuck@8997 487 Method* m = get_interesting_method_entry(NULL);
dbuck@8997 488 // return false if m refers to a non-deleted old or obsolete method
dbuck@8997 489 if (m != NULL) {
dbuck@8997 490 assert(m->is_valid() && m->is_method(), "m is a valid method");
dbuck@8997 491 return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete
dbuck@8997 492 } else {
aoqi@0 493 return true;
aoqi@0 494 }
aoqi@0 495 }
aoqi@0 496
sspitsyn@7636 497 Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {
aoqi@0 498 if (!is_method_entry()) {
aoqi@0 499 // not a method entry so not interesting by default
sspitsyn@7636 500 return NULL;
aoqi@0 501 }
aoqi@0 502 Method* m = NULL;
aoqi@0 503 if (is_vfinal()) {
aoqi@0 504 // virtual and final so _f2 contains method ptr instead of vtable index
aoqi@0 505 m = f2_as_vfinal_method();
aoqi@0 506 } else if (is_f1_null()) {
aoqi@0 507 // NULL _f1 means this is a virtual entry so also not interesting
sspitsyn@7636 508 return NULL;
aoqi@0 509 } else {
aoqi@0 510 if (!(_f1->is_method())) {
dbuck@8997 511 // _f1 is a Klass* for an interface
dbuck@8997 512 m = f2_as_interface_method();
dbuck@8997 513 } else {
dbuck@8997 514 m = f1_as_method();
aoqi@0 515 }
aoqi@0 516 }
aoqi@0 517 assert(m != NULL && m->is_method(), "sanity check");
aoqi@0 518 if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
aoqi@0 519 // robustness for above sanity checks or method is not in
aoqi@0 520 // the interesting class
sspitsyn@7636 521 return NULL;
aoqi@0 522 }
aoqi@0 523 // the method is in the interesting class so the entry is interesting
sspitsyn@7636 524 return m;
aoqi@0 525 }
aoqi@0 526 #endif // INCLUDE_JVMTI
aoqi@0 527
aoqi@0 528 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
aoqi@0 529 // print separator
aoqi@0 530 if (index == 0) st->print_cr(" -------------");
aoqi@0 531 // print entry
aoqi@0 532 st->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this);
aoqi@0 533 st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(),
aoqi@0 534 constant_pool_index());
aoqi@0 535 st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f1);
aoqi@0 536 st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2);
aoqi@0 537 st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags);
aoqi@0 538 st->print_cr(" -------------");
aoqi@0 539 }
aoqi@0 540
aoqi@0 541 void ConstantPoolCacheEntry::verify(outputStream* st) const {
aoqi@0 542 // not implemented yet
aoqi@0 543 }
aoqi@0 544
aoqi@0 545 // Implementation of ConstantPoolCache
aoqi@0 546
aoqi@0 547 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
aoqi@0 548 const intStack& index_map,
aoqi@0 549 const intStack& invokedynamic_index_map,
aoqi@0 550 const intStack& invokedynamic_map, TRAPS) {
aoqi@0 551
aoqi@0 552 const int length = index_map.length() + invokedynamic_index_map.length();
aoqi@0 553 int size = ConstantPoolCache::size(length);
aoqi@0 554
aoqi@0 555 return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
aoqi@0 556 ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
aoqi@0 557 }
aoqi@0 558
aoqi@0 559 void ConstantPoolCache::initialize(const intArray& inverse_index_map,
aoqi@0 560 const intArray& invokedynamic_inverse_index_map,
aoqi@0 561 const intArray& invokedynamic_references_map) {
aoqi@0 562 for (int i = 0; i < inverse_index_map.length(); i++) {
aoqi@0 563 ConstantPoolCacheEntry* e = entry_at(i);
aoqi@0 564 int original_index = inverse_index_map[i];
aoqi@0 565 e->initialize_entry(original_index);
aoqi@0 566 assert(entry_at(i) == e, "sanity");
aoqi@0 567 }
aoqi@0 568
aoqi@0 569 // Append invokedynamic entries at the end
aoqi@0 570 int invokedynamic_offset = inverse_index_map.length();
aoqi@0 571 for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
aoqi@0 572 int offset = i + invokedynamic_offset;
aoqi@0 573 ConstantPoolCacheEntry* e = entry_at(offset);
aoqi@0 574 int original_index = invokedynamic_inverse_index_map[i];
aoqi@0 575 e->initialize_entry(original_index);
aoqi@0 576 assert(entry_at(offset) == e, "sanity");
aoqi@0 577 }
aoqi@0 578
aoqi@0 579 for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
aoqi@0 580 const int cpci = invokedynamic_references_map[ref];
aoqi@0 581 if (cpci >= 0) {
aoqi@0 582 #ifdef ASSERT
aoqi@0 583 // invokedynamic and invokehandle have more entries; check if they
aoqi@0 584 // all point to the same constant pool cache entry.
aoqi@0 585 for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
aoqi@0 586 const int cpci_next = invokedynamic_references_map[ref + entry];
aoqi@0 587 assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));
aoqi@0 588 }
aoqi@0 589 #endif
aoqi@0 590 entry_at(cpci)->initialize_resolved_reference_index(ref);
aoqi@0 591 ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1; // skip extra entries
aoqi@0 592 }
aoqi@0 593 }
aoqi@0 594 }
aoqi@0 595
aoqi@0 596 #if INCLUDE_JVMTI
aoqi@0 597 // RedefineClasses() API support:
aoqi@0 598 // If any entry of this ConstantPoolCache points to any of
aoqi@0 599 // old_methods, replace it with the corresponding new_method.
sspitsyn@7636 600 void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
aoqi@0 601 for (int i = 0; i < length(); i++) {
sspitsyn@7636 602 ConstantPoolCacheEntry* entry = entry_at(i);
sspitsyn@7636 603 Method* old_method = entry->get_interesting_method_entry(holder);
sspitsyn@7636 604 if (old_method == NULL || !old_method->is_old()) {
sspitsyn@7636 605 continue; // skip uninteresting entries
sspitsyn@7636 606 }
sspitsyn@7636 607 if (old_method->is_deleted()) {
sspitsyn@7636 608 // clean up entries with deleted methods
sspitsyn@7636 609 entry->initialize_entry(entry->constant_pool_index());
aoqi@0 610 continue;
aoqi@0 611 }
sspitsyn@7636 612 Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
aoqi@0 613
sspitsyn@7636 614 assert(new_method != NULL, "method_with_idnum() should not be NULL");
sspitsyn@7636 615 assert(old_method != new_method, "sanity check");
aoqi@0 616
sspitsyn@7636 617 entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
aoqi@0 618 }
aoqi@0 619 }
aoqi@0 620
aoqi@0 621 // the constant pool cache should never contain old or obsolete methods
aoqi@0 622 bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
aoqi@0 623 for (int i = 1; i < length(); i++) {
sspitsyn@7636 624 if (entry_at(i)->get_interesting_method_entry(NULL) != NULL &&
aoqi@0 625 !entry_at(i)->check_no_old_or_obsolete_entries()) {
aoqi@0 626 return false;
aoqi@0 627 }
aoqi@0 628 }
aoqi@0 629 return true;
aoqi@0 630 }
aoqi@0 631
aoqi@0 632 void ConstantPoolCache::dump_cache() {
aoqi@0 633 for (int i = 1; i < length(); i++) {
sspitsyn@7636 634 if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) {
aoqi@0 635 entry_at(i)->print(tty, i);
aoqi@0 636 }
aoqi@0 637 }
aoqi@0 638 }
aoqi@0 639 #endif // INCLUDE_JVMTI
aoqi@0 640
aoqi@0 641
aoqi@0 642 // Printing
aoqi@0 643
aoqi@0 644 void ConstantPoolCache::print_on(outputStream* st) const {
aoqi@0 645 assert(is_constantPoolCache(), "obj must be constant pool cache");
aoqi@0 646 st->print_cr("%s", internal_name());
aoqi@0 647 // print constant pool cache entries
aoqi@0 648 for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);
aoqi@0 649 }
aoqi@0 650
aoqi@0 651 void ConstantPoolCache::print_value_on(outputStream* st) const {
aoqi@0 652 assert(is_constantPoolCache(), "obj must be constant pool cache");
aoqi@0 653 st->print("cache [%d]", length());
aoqi@0 654 print_address_on(st);
aoqi@0 655 st->print(" for ");
aoqi@0 656 constant_pool()->print_value_on(st);
aoqi@0 657 }
aoqi@0 658
aoqi@0 659
aoqi@0 660 // Verification
aoqi@0 661
aoqi@0 662 void ConstantPoolCache::verify_on(outputStream* st) {
aoqi@0 663 guarantee(is_constantPoolCache(), "obj must be constant pool cache");
aoqi@0 664 // print constant pool cache entries
aoqi@0 665 for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
aoqi@0 666 }

mercurial