src/share/vm/interpreter/rewriter.cpp

Thu, 17 Mar 2011 18:29:18 -0700

author
jrose
date
Thu, 17 Mar 2011 18:29:18 -0700
changeset 2641
d2134498fd3f
parent 2533
c5a923563727
child 2742
ed69575596ac
permissions
-rw-r--r--

7011865: JSR 292 CTW fails: !THREAD->is_Compiler_thread() failed: Can not load classes with the Compiler thre
Reviewed-by: kvn, never

duke@435 1 /*
never@2462 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "interpreter/bytecodes.hpp"
stefank@2314 27 #include "interpreter/interpreter.hpp"
stefank@2314 28 #include "interpreter/rewriter.hpp"
stefank@2314 29 #include "memory/gcLocker.hpp"
stefank@2314 30 #include "memory/oopFactory.hpp"
stefank@2314 31 #include "memory/resourceArea.hpp"
stefank@2314 32 #include "oops/generateOopMap.hpp"
stefank@2314 33 #include "oops/objArrayOop.hpp"
stefank@2314 34 #include "oops/oop.inline.hpp"
stefank@2314 35 #include "prims/methodComparator.hpp"
duke@435 36
jrose@1161 37 // Computes a CPC map (new_index -> original_index) for constant pool entries
duke@435 38 // that are referred to by the interpreter at runtime via the constant pool cache.
jrose@1161 39 // Also computes a CP map (original_index -> new_index).
jrose@1161 40 // Marks entries in CP which require additional processing.
jrose@1161 41 void Rewriter::compute_index_maps() {
jrose@1161 42 const int length = _pool->length();
jrose@1161 43 init_cp_map(length);
jrose@2015 44 jint tag_mask = 0;
duke@435 45 for (int i = 0; i < length; i++) {
jrose@1161 46 int tag = _pool->tag_at(i).value();
jrose@2015 47 tag_mask |= (1 << tag);
jrose@1161 48 switch (tag) {
jrose@1161 49 case JVM_CONSTANT_InterfaceMethodref:
duke@435 50 case JVM_CONSTANT_Fieldref : // fall through
duke@435 51 case JVM_CONSTANT_Methodref : // fall through
jrose@1957 52 case JVM_CONSTANT_MethodHandle : // fall through
jrose@1957 53 case JVM_CONSTANT_MethodType : // fall through
jrose@2015 54 case JVM_CONSTANT_InvokeDynamic : // fall through
jrose@2353 55 case JVM_CONSTANT_InvokeDynamicTrans: // fall through
jrose@1161 56 add_cp_cache_entry(i);
jrose@1161 57 break;
duke@435 58 }
duke@435 59 }
jrose@1161 60
jrose@1161 61 guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
jrose@1161 62 "all cp cache indexes fit in a u2");
jrose@2015 63
jrose@2015 64 _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
jrose@2353 65 _have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0);
duke@435 66 }
duke@435 67
duke@435 68
jrose@1161 69 // Creates a constant pool cache given a CPC map
jrose@1161 70 void Rewriter::make_constant_pool_cache(TRAPS) {
jrose@1161 71 const int length = _cp_cache_map.length();
jrose@1161 72 constantPoolCacheOop cache =
ysr@2533 73 oopFactory::new_constantPoolCache(length, CHECK);
ysr@2533 74 No_Safepoint_Verifier nsv;
jrose@1161 75 cache->initialize(_cp_cache_map);
jrose@2015 76
jrose@2353 77 // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic.
jrose@2015 78 if (_have_invoke_dynamic) {
jrose@2015 79 for (int i = 0; i < length; i++) {
jrose@2015 80 int pool_index = cp_cache_entry_pool_index(i);
jrose@2015 81 if (pool_index >= 0 &&
jrose@2015 82 _pool->tag_at(pool_index).is_invoke_dynamic()) {
jrose@2015 83 int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index);
jrose@2015 84 if (bsm_index != 0) {
jrose@2015 85 assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant");
jrose@2015 86 // There is a CP cache entry holding the BSM for these calls.
jrose@2015 87 int bsm_cache_index = cp_entry_to_cp_cache(bsm_index);
jrose@2015 88 cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index);
jrose@2015 89 } else {
jrose@2015 90 // There is no CP cache entry holding the BSM for these calls.
jrose@2015 91 // We will need to look for a class-global BSM, later.
jrose@2015 92 guarantee(AllowTransitionalJSR292, "");
jrose@2015 93 }
jrose@2015 94 }
jrose@2015 95 }
jrose@2015 96 }
jrose@2015 97
jrose@1161 98 _pool->set_cache(cache);
jrose@1161 99 cache->set_constant_pool(_pool());
duke@435 100 }
duke@435 101
duke@435 102
duke@435 103
duke@435 104 // The new finalization semantics says that registration of
duke@435 105 // finalizable objects must be performed on successful return from the
duke@435 106 // Object.<init> constructor. We could implement this trivially if
duke@435 107 // <init> were never rewritten but since JVMTI allows this to occur, a
duke@435 108 // more complicated solution is required. A special return bytecode
duke@435 109 // is used only by Object.<init> to signal the finalization
duke@435 110 // registration point. Additionally local 0 must be preserved so it's
duke@435 111 // available to pass to the registration function. For simplicty we
duke@435 112 // require that local 0 is never overwritten so it's available as an
duke@435 113 // argument for registration.
duke@435 114
duke@435 115 void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
duke@435 116 RawBytecodeStream bcs(method);
duke@435 117 while (!bcs.is_last_bytecode()) {
duke@435 118 Bytecodes::Code opcode = bcs.raw_next();
duke@435 119 switch (opcode) {
duke@435 120 case Bytecodes::_return: *bcs.bcp() = Bytecodes::_return_register_finalizer; break;
duke@435 121
duke@435 122 case Bytecodes::_istore:
duke@435 123 case Bytecodes::_lstore:
duke@435 124 case Bytecodes::_fstore:
duke@435 125 case Bytecodes::_dstore:
duke@435 126 case Bytecodes::_astore:
duke@435 127 if (bcs.get_index() != 0) continue;
duke@435 128
duke@435 129 // fall through
duke@435 130 case Bytecodes::_istore_0:
duke@435 131 case Bytecodes::_lstore_0:
duke@435 132 case Bytecodes::_fstore_0:
duke@435 133 case Bytecodes::_dstore_0:
duke@435 134 case Bytecodes::_astore_0:
duke@435 135 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
duke@435 136 "can't overwrite local 0 in Object.<init>");
duke@435 137 break;
duke@435 138 }
duke@435 139 }
duke@435 140 }
duke@435 141
duke@435 142
jrose@1161 143 // Rewrite a classfile-order CP index into a native-order CPC index.
jrose@1920 144 void Rewriter::rewrite_member_reference(address bcp, int offset) {
jrose@1161 145 address p = bcp + offset;
jrose@1161 146 int cp_index = Bytes::get_Java_u2(p);
jrose@1161 147 int cache_index = cp_entry_to_cp_cache(cp_index);
jrose@1161 148 Bytes::put_native_u2(p, cache_index);
jrose@1161 149 }
jrose@1161 150
jrose@1161 151
jrose@1920 152 void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
jrose@1161 153 address p = bcp + offset;
jrose@1161 154 assert(p[-1] == Bytecodes::_invokedynamic, "");
jrose@1161 155 int cp_index = Bytes::get_Java_u2(p);
jrose@1161 156 int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily
jrose@1494 157 int cpc2 = add_secondary_cp_cache_entry(cpc);
jrose@1161 158
jrose@1161 159 // Replace the trailing four bytes with a CPC index for the dynamic
jrose@1161 160 // call site. Unlike other CPC entries, there is one per bytecode,
jrose@1161 161 // not just one per distinct CP entry. In other words, the
jrose@1161 162 // CPC-to-CP relation is many-to-one for invokedynamic entries.
jrose@1161 163 // This means we must use a larger index size than u2 to address
jrose@1161 164 // all these entries. That is the main reason invokedynamic
jrose@1161 165 // must have a five-byte instruction format. (Of course, other JVM
jrose@1161 166 // implementations can use the bytes for other purposes.)
jrose@1494 167 Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
jrose@1161 168 // Note: We use native_u4 format exclusively for 4-byte indexes.
jrose@1161 169 }
jrose@1161 170
jrose@1161 171
jrose@1957 172 // Rewrite some ldc bytecodes to _fast_aldc
jrose@1957 173 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
jrose@1957 174 assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
jrose@1957 175 address p = bcp + offset;
jrose@1957 176 int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
jrose@1957 177 constantTag tag = _pool->tag_at(cp_index).value();
jrose@1957 178 if (tag.is_method_handle() || tag.is_method_type()) {
jrose@1957 179 int cache_index = cp_entry_to_cp_cache(cp_index);
jrose@1957 180 if (is_wide) {
jrose@1957 181 (*bcp) = Bytecodes::_fast_aldc_w;
jrose@1957 182 assert(cache_index == (u2)cache_index, "");
jrose@1957 183 Bytes::put_native_u2(p, cache_index);
jrose@1957 184 } else {
jrose@1957 185 (*bcp) = Bytecodes::_fast_aldc;
jrose@1957 186 assert(cache_index == (u1)cache_index, "");
jrose@1957 187 (*p) = (u1)cache_index;
jrose@1957 188 }
jrose@1957 189 }
jrose@1957 190 }
jrose@1957 191
jrose@1957 192
duke@435 193 // Rewrites a method given the index_map information
jrose@1161 194 void Rewriter::scan_method(methodOop method) {
duke@435 195
duke@435 196 int nof_jsrs = 0;
duke@435 197 bool has_monitor_bytecodes = false;
duke@435 198
duke@435 199 {
duke@435 200 // We cannot tolerate a GC in this block, because we've
duke@435 201 // cached the bytecodes in 'code_base'. If the methodOop
duke@435 202 // moves, the bytecodes will also move.
duke@435 203 No_Safepoint_Verifier nsv;
duke@435 204 Bytecodes::Code c;
duke@435 205
duke@435 206 // Bytecodes and their length
duke@435 207 const address code_base = method->code_base();
duke@435 208 const int code_length = method->code_size();
duke@435 209
duke@435 210 int bc_length;
duke@435 211 for (int bci = 0; bci < code_length; bci += bc_length) {
duke@435 212 address bcp = code_base + bci;
jrose@1161 213 int prefix_length = 0;
duke@435 214 c = (Bytecodes::Code)(*bcp);
duke@435 215
duke@435 216 // Since we have the code, see if we can get the length
duke@435 217 // directly. Some more complicated bytecodes will report
duke@435 218 // a length of zero, meaning we need to make another method
duke@435 219 // call to calculate the length.
duke@435 220 bc_length = Bytecodes::length_for(c);
duke@435 221 if (bc_length == 0) {
never@2462 222 bc_length = Bytecodes::length_at(method, bcp);
duke@435 223
duke@435 224 // length_at will put us at the bytecode after the one modified
duke@435 225 // by 'wide'. We don't currently examine any of the bytecodes
duke@435 226 // modified by wide, but in case we do in the future...
duke@435 227 if (c == Bytecodes::_wide) {
jrose@1161 228 prefix_length = 1;
duke@435 229 c = (Bytecodes::Code)bcp[1];
duke@435 230 }
duke@435 231 }
duke@435 232
duke@435 233 assert(bc_length != 0, "impossible bytecode length");
duke@435 234
duke@435 235 switch (c) {
duke@435 236 case Bytecodes::_lookupswitch : {
duke@435 237 #ifndef CC_INTERP
never@2462 238 Bytecode_lookupswitch bc(method, bcp);
jrose@1920 239 (*bcp) = (
never@2462 240 bc.number_of_pairs() < BinarySwitchThreshold
duke@435 241 ? Bytecodes::_fast_linearswitch
duke@435 242 : Bytecodes::_fast_binaryswitch
duke@435 243 );
duke@435 244 #endif
duke@435 245 break;
duke@435 246 }
duke@435 247 case Bytecodes::_getstatic : // fall through
duke@435 248 case Bytecodes::_putstatic : // fall through
duke@435 249 case Bytecodes::_getfield : // fall through
duke@435 250 case Bytecodes::_putfield : // fall through
duke@435 251 case Bytecodes::_invokevirtual : // fall through
duke@435 252 case Bytecodes::_invokespecial : // fall through
jrose@1161 253 case Bytecodes::_invokestatic :
jrose@1161 254 case Bytecodes::_invokeinterface:
jrose@1161 255 rewrite_member_reference(bcp, prefix_length+1);
duke@435 256 break;
jrose@1161 257 case Bytecodes::_invokedynamic:
jrose@1920 258 rewrite_invokedynamic(bcp, prefix_length+1);
jrose@1161 259 break;
jrose@1957 260 case Bytecodes::_ldc:
jrose@1957 261 maybe_rewrite_ldc(bcp, prefix_length+1, false);
jrose@1957 262 break;
jrose@1957 263 case Bytecodes::_ldc_w:
jrose@1957 264 maybe_rewrite_ldc(bcp, prefix_length+1, true);
jrose@1957 265 break;
duke@435 266 case Bytecodes::_jsr : // fall through
duke@435 267 case Bytecodes::_jsr_w : nof_jsrs++; break;
duke@435 268 case Bytecodes::_monitorenter : // fall through
duke@435 269 case Bytecodes::_monitorexit : has_monitor_bytecodes = true; break;
duke@435 270 }
duke@435 271 }
duke@435 272 }
duke@435 273
duke@435 274 // Update access flags
duke@435 275 if (has_monitor_bytecodes) {
duke@435 276 method->set_has_monitor_bytecodes();
duke@435 277 }
duke@435 278
duke@435 279 // The present of a jsr bytecode implies that the method might potentially
duke@435 280 // have to be rewritten, so we run the oopMapGenerator on the method
duke@435 281 if (nof_jsrs > 0) {
duke@435 282 method->set_has_jsrs();
jrose@1161 283 // Second pass will revisit this method.
jrose@1161 284 assert(method->has_jsrs(), "");
jrose@1161 285 }
jrose@1161 286 }
duke@435 287
jrose@1161 288 // After constant pool is created, revisit methods containing jsrs.
jrose@1161 289 methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
jrose@1161 290 ResolveOopMapConflicts romc(method);
jrose@1161 291 methodHandle original_method = method;
jrose@1161 292 method = romc.do_potential_rewrite(CHECK_(methodHandle()));
jrose@1161 293 if (method() != original_method()) {
jrose@1161 294 // Insert invalid bytecode into original methodOop and set
jrose@1161 295 // interpreter entrypoint, so that a executing this method
jrose@1161 296 // will manifest itself in an easy recognizable form.
jrose@1161 297 address bcp = original_method->bcp_from(0);
jrose@1161 298 *bcp = (u1)Bytecodes::_shouldnotreachhere;
jrose@1161 299 int kind = Interpreter::method_kind(original_method);
jrose@1161 300 original_method->set_interpreter_kind(kind);
duke@435 301 }
duke@435 302
jrose@1161 303 // Update monitor matching info.
jrose@1161 304 if (romc.monitor_safe()) {
jrose@1161 305 method->set_guaranteed_monitor_matching();
jrose@1161 306 }
duke@435 307
duke@435 308 return method;
duke@435 309 }
duke@435 310
duke@435 311
duke@435 312 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
duke@435 313 ResourceMark rm(THREAD);
twisti@1573 314 Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
jrose@1161 315 // (That's all, folks.)
jrose@1161 316 }
jrose@1161 317
twisti@1573 318
twisti@1573 319 void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
twisti@1573 320 ResourceMark rm(THREAD);
twisti@1573 321 Rewriter rw(klass, cpool, methods, CHECK);
twisti@1573 322 // (That's all, folks.)
twisti@1573 323 }
twisti@1573 324
twisti@1573 325
twisti@1573 326 Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
jrose@1161 327 : _klass(klass),
twisti@1573 328 _pool(cpool),
twisti@1573 329 _methods(methods)
jrose@1161 330 {
jrose@1161 331 assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
duke@435 332
duke@435 333 // determine index maps for methodOop rewriting
jrose@1161 334 compute_index_maps();
duke@435 335
jrose@1161 336 if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
jrose@1291 337 bool did_rewrite = false;
jrose@1161 338 int i = _methods->length();
duke@435 339 while (i-- > 0) {
jrose@1161 340 methodOop method = (methodOop)_methods->obj_at(i);
duke@435 341 if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
duke@435 342 // rewrite the return bytecodes of Object.<init> to register the
duke@435 343 // object for finalization if needed.
duke@435 344 methodHandle m(THREAD, method);
duke@435 345 rewrite_Object_init(m, CHECK);
jrose@1291 346 did_rewrite = true;
duke@435 347 break;
duke@435 348 }
duke@435 349 }
jrose@1291 350 assert(did_rewrite, "must find Object::<init> to rewrite it");
duke@435 351 }
duke@435 352
jrose@1161 353 // rewrite methods, in two passes
jrose@1161 354 int i, len = _methods->length();
jrose@1161 355
jrose@1161 356 for (i = len; --i >= 0; ) {
jrose@1161 357 methodOop method = (methodOop)_methods->obj_at(i);
jrose@1161 358 scan_method(method);
jrose@1161 359 }
jrose@1161 360
jrose@1161 361 // allocate constant pool cache, now that we've seen all the bytecodes
jrose@1161 362 make_constant_pool_cache(CHECK);
jrose@1161 363
jrose@1161 364 for (i = len; --i >= 0; ) {
jrose@1161 365 methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
jrose@1161 366
jrose@1161 367 if (m->has_jsrs()) {
jrose@1161 368 m = rewrite_jsrs(m, CHECK);
duke@435 369 // Method might have gotten rewritten.
jrose@1161 370 _methods->obj_at_put(i, m());
duke@435 371 }
jrose@1161 372
jrose@1161 373 // Set up method entry points for compiler and interpreter.
jrose@1161 374 m->link_method(m, CHECK);
jrose@1929 375
jrose@1929 376 #ifdef ASSERT
jrose@1929 377 if (StressMethodComparator) {
jrose@1929 378 static int nmc = 0;
jrose@1929 379 for (int j = i; j >= 0 && j >= i-4; j--) {
jrose@1929 380 if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc);
jrose@1929 381 bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
jrose@1929 382 if (j == i && !z) {
jrose@1929 383 tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
jrose@1929 384 assert(z, "method must compare equal to itself");
jrose@1929 385 }
jrose@1929 386 }
jrose@1929 387 }
jrose@1929 388 #endif //ASSERT
duke@435 389 }
duke@435 390 }

mercurial