src/share/vm/interpreter/rewriter.cpp

Wed, 28 May 2008 21:06:24 -0700

author
coleenp
date
Wed, 28 May 2008 21:06:24 -0700
changeset 602
feeb96a45707
parent 435
a61af66fc99e
child 977
9a25e0c45327
permissions
-rw-r--r--

6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
Summary: decouple set_klass() with zeroing the gap when compressed.
Reviewed-by: kvn, ysr, jrose

duke@435 1 /*
duke@435 2 * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_rewriter.cpp.incl"
duke@435 27
duke@435 28
duke@435 29 // Computes an index_map (new_index -> original_index) for contant pool entries
duke@435 30 // that are referred to by the interpreter at runtime via the constant pool cache.
duke@435 31 void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map) {
duke@435 32 const int length = pool->length();
duke@435 33 index_map = new intArray(length, -1);
duke@435 34 // Choose an initial value large enough that we don't get frequent
duke@435 35 // calls to grow().
duke@435 36 inverse_index_map = new intStack(length / 2);
duke@435 37 for (int i = 0; i < length; i++) {
duke@435 38 switch (pool->tag_at(i).value()) {
duke@435 39 case JVM_CONSTANT_Fieldref : // fall through
duke@435 40 case JVM_CONSTANT_Methodref : // fall through
duke@435 41 case JVM_CONSTANT_InterfaceMethodref: {
duke@435 42 index_map->at_put(i, inverse_index_map->length());
duke@435 43 inverse_index_map->append(i);
duke@435 44 }
duke@435 45 }
duke@435 46 }
duke@435 47 }
duke@435 48
duke@435 49
duke@435 50 // Creates a constant pool cache given an inverse_index_map
duke@435 51 constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) {
duke@435 52 const int length = inverse_index_map.length();
duke@435 53 constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, CHECK_(constantPoolCacheHandle()));
duke@435 54 cache->initialize(inverse_index_map);
duke@435 55 return constantPoolCacheHandle(THREAD, cache);
duke@435 56 }
duke@435 57
duke@435 58
duke@435 59
duke@435 60 // The new finalization semantics says that registration of
duke@435 61 // finalizable objects must be performed on successful return from the
duke@435 62 // Object.<init> constructor. We could implement this trivially if
duke@435 63 // <init> were never rewritten but since JVMTI allows this to occur, a
duke@435 64 // more complicated solution is required. A special return bytecode
duke@435 65 // is used only by Object.<init> to signal the finalization
duke@435 66 // registration point. Additionally local 0 must be preserved so it's
duke@435 67 // available to pass to the registration function. For simplicty we
duke@435 68 // require that local 0 is never overwritten so it's available as an
duke@435 69 // argument for registration.
duke@435 70
duke@435 71 void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
duke@435 72 RawBytecodeStream bcs(method);
duke@435 73 while (!bcs.is_last_bytecode()) {
duke@435 74 Bytecodes::Code opcode = bcs.raw_next();
duke@435 75 switch (opcode) {
duke@435 76 case Bytecodes::_return: *bcs.bcp() = Bytecodes::_return_register_finalizer; break;
duke@435 77
duke@435 78 case Bytecodes::_istore:
duke@435 79 case Bytecodes::_lstore:
duke@435 80 case Bytecodes::_fstore:
duke@435 81 case Bytecodes::_dstore:
duke@435 82 case Bytecodes::_astore:
duke@435 83 if (bcs.get_index() != 0) continue;
duke@435 84
duke@435 85 // fall through
duke@435 86 case Bytecodes::_istore_0:
duke@435 87 case Bytecodes::_lstore_0:
duke@435 88 case Bytecodes::_fstore_0:
duke@435 89 case Bytecodes::_dstore_0:
duke@435 90 case Bytecodes::_astore_0:
duke@435 91 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
duke@435 92 "can't overwrite local 0 in Object.<init>");
duke@435 93 break;
duke@435 94 }
duke@435 95 }
duke@435 96 }
duke@435 97
duke@435 98
duke@435 99 // Rewrites a method given the index_map information
duke@435 100 methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) {
duke@435 101
duke@435 102 int nof_jsrs = 0;
duke@435 103 bool has_monitor_bytecodes = false;
duke@435 104
duke@435 105 {
duke@435 106 // We cannot tolerate a GC in this block, because we've
duke@435 107 // cached the bytecodes in 'code_base'. If the methodOop
duke@435 108 // moves, the bytecodes will also move.
duke@435 109 No_Safepoint_Verifier nsv;
duke@435 110 Bytecodes::Code c;
duke@435 111
duke@435 112 // Bytecodes and their length
duke@435 113 const address code_base = method->code_base();
duke@435 114 const int code_length = method->code_size();
duke@435 115
duke@435 116 int bc_length;
duke@435 117 for (int bci = 0; bci < code_length; bci += bc_length) {
duke@435 118 address bcp = code_base + bci;
duke@435 119 c = (Bytecodes::Code)(*bcp);
duke@435 120
duke@435 121 // Since we have the code, see if we can get the length
duke@435 122 // directly. Some more complicated bytecodes will report
duke@435 123 // a length of zero, meaning we need to make another method
duke@435 124 // call to calculate the length.
duke@435 125 bc_length = Bytecodes::length_for(c);
duke@435 126 if (bc_length == 0) {
duke@435 127 bc_length = Bytecodes::length_at(bcp);
duke@435 128
duke@435 129 // length_at will put us at the bytecode after the one modified
duke@435 130 // by 'wide'. We don't currently examine any of the bytecodes
duke@435 131 // modified by wide, but in case we do in the future...
duke@435 132 if (c == Bytecodes::_wide) {
duke@435 133 c = (Bytecodes::Code)bcp[1];
duke@435 134 }
duke@435 135 }
duke@435 136
duke@435 137 assert(bc_length != 0, "impossible bytecode length");
duke@435 138
duke@435 139 switch (c) {
duke@435 140 case Bytecodes::_lookupswitch : {
duke@435 141 #ifndef CC_INTERP
duke@435 142 Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
duke@435 143 bc->set_code(
duke@435 144 bc->number_of_pairs() < BinarySwitchThreshold
duke@435 145 ? Bytecodes::_fast_linearswitch
duke@435 146 : Bytecodes::_fast_binaryswitch
duke@435 147 );
duke@435 148 #endif
duke@435 149 break;
duke@435 150 }
duke@435 151 case Bytecodes::_getstatic : // fall through
duke@435 152 case Bytecodes::_putstatic : // fall through
duke@435 153 case Bytecodes::_getfield : // fall through
duke@435 154 case Bytecodes::_putfield : // fall through
duke@435 155 case Bytecodes::_invokevirtual : // fall through
duke@435 156 case Bytecodes::_invokespecial : // fall through
duke@435 157 case Bytecodes::_invokestatic : // fall through
duke@435 158 case Bytecodes::_invokeinterface: {
duke@435 159 address p = bcp + 1;
duke@435 160 Bytes::put_native_u2(p, index_map[Bytes::get_Java_u2(p)]);
duke@435 161 break;
duke@435 162 }
duke@435 163 case Bytecodes::_jsr : // fall through
duke@435 164 case Bytecodes::_jsr_w : nof_jsrs++; break;
duke@435 165 case Bytecodes::_monitorenter : // fall through
duke@435 166 case Bytecodes::_monitorexit : has_monitor_bytecodes = true; break;
duke@435 167 }
duke@435 168 }
duke@435 169 }
duke@435 170
duke@435 171 // Update access flags
duke@435 172 if (has_monitor_bytecodes) {
duke@435 173 method->set_has_monitor_bytecodes();
duke@435 174 }
duke@435 175
duke@435 176 // The present of a jsr bytecode implies that the method might potentially
duke@435 177 // have to be rewritten, so we run the oopMapGenerator on the method
duke@435 178 if (nof_jsrs > 0) {
duke@435 179 method->set_has_jsrs();
duke@435 180 ResolveOopMapConflicts romc(method);
duke@435 181 methodHandle original_method = method;
duke@435 182 method = romc.do_potential_rewrite(CHECK_(methodHandle()));
duke@435 183 if (method() != original_method()) {
duke@435 184 // Insert invalid bytecode into original methodOop and set
duke@435 185 // interpreter entrypoint, so that a executing this method
duke@435 186 // will manifest itself in an easy recognizable form.
duke@435 187 address bcp = original_method->bcp_from(0);
duke@435 188 *bcp = (u1)Bytecodes::_shouldnotreachhere;
duke@435 189 int kind = Interpreter::method_kind(original_method);
duke@435 190 original_method->set_interpreter_kind(kind);
duke@435 191 }
duke@435 192
duke@435 193 // Update monitor matching info.
duke@435 194 if (romc.monitor_safe()) {
duke@435 195 method->set_guaranteed_monitor_matching();
duke@435 196 }
duke@435 197 }
duke@435 198
duke@435 199 // Setup method entrypoints for compiler and interpreter
duke@435 200 method->link_method(method, CHECK_(methodHandle()));
duke@435 201
duke@435 202 return method;
duke@435 203 }
duke@435 204
duke@435 205
duke@435 206 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
duke@435 207 // gather starting points
duke@435 208 ResourceMark rm(THREAD);
duke@435 209 constantPoolHandle pool (THREAD, klass->constants());
duke@435 210 objArrayHandle methods (THREAD, klass->methods());
duke@435 211 assert(pool->cache() == NULL, "constant pool cache must not be set yet");
duke@435 212
duke@435 213 // determine index maps for methodOop rewriting
duke@435 214 intArray* index_map = NULL;
duke@435 215 intStack* inverse_index_map = NULL;
duke@435 216 compute_index_maps(pool, index_map, inverse_index_map);
duke@435 217
duke@435 218 // allocate constant pool cache
duke@435 219 constantPoolCacheHandle cache = new_constant_pool_cache(*inverse_index_map, CHECK);
duke@435 220 pool->set_cache(cache());
duke@435 221 cache->set_constant_pool(pool());
duke@435 222
duke@435 223 if (RegisterFinalizersAtInit && klass->name() == vmSymbols::java_lang_Object()) {
duke@435 224 int i = methods->length();
duke@435 225 while (i-- > 0) {
duke@435 226 methodOop method = (methodOop)methods->obj_at(i);
duke@435 227 if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
duke@435 228 // rewrite the return bytecodes of Object.<init> to register the
duke@435 229 // object for finalization if needed.
duke@435 230 methodHandle m(THREAD, method);
duke@435 231 rewrite_Object_init(m, CHECK);
duke@435 232 break;
duke@435 233 }
duke@435 234 }
duke@435 235 }
duke@435 236
duke@435 237 // rewrite methods
duke@435 238 { int i = methods->length();
duke@435 239 while (i-- > 0) {
duke@435 240 methodHandle m(THREAD, (methodOop)methods->obj_at(i));
duke@435 241 m = rewrite_method(m, *index_map, CHECK);
duke@435 242 // Method might have gotten rewritten.
duke@435 243 methods->obj_at_put(i, m());
duke@435 244 }
duke@435 245 }
duke@435 246 }

mercurial