1.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Aug 31 16:39:35 2012 -0700 1.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Sat Sep 01 13:25:18 2012 -0400 1.3 @@ -28,8 +28,8 @@ 1.4 #include "interpreter/interpreterRuntime.hpp" 1.5 #include "oops/arrayOop.hpp" 1.6 #include "oops/markOop.hpp" 1.7 -#include "oops/methodDataOop.hpp" 1.8 -#include "oops/methodOop.hpp" 1.9 +#include "oops/methodData.hpp" 1.10 +#include "oops/method.hpp" 1.11 #include "prims/jvmtiExport.hpp" 1.12 #include "prims/jvmtiRedefineClassesTrace.hpp" 1.13 #include "prims/jvmtiThreadState.hpp" 1.14 @@ -514,17 +514,16 @@ 1.15 1.16 // Reset SP by subtracting more space from Lesp. 1.17 Label done; 1.18 - verify_oop(Lmethod); 1.19 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 1.20 1.21 // A native does not need to do this, since its callee does not change SP. 1.22 - ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. 1.23 + ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 1.24 btst(JVM_ACC_NATIVE, Gframe_size); 1.25 br(Assembler::notZero, false, Assembler::pt, done); 1.26 delayed()->nop(); 1.27 1.28 // Compute max expression stack+register save area 1.29 - lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. 1.30 + lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack. 1.31 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); 1.32 1.33 // 1.34 @@ -610,7 +609,7 @@ 1.35 1.36 // Assume we want to go compiled if available 1.37 1.38 - ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); 1.39 + ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 1.40 1.41 if (JvmtiExport::can_post_interpreter_events()) { 1.42 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 1.43 @@ -622,11 +621,11 @@ 1.44 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 1.45 ld(interp_only, scratch); 1.46 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 1.47 - delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); 1.48 + delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 1.49 bind(skip_compiled_code); 1.50 } 1.51 1.52 - // the i2c_adapters need methodOop in G5_method (right? %%%) 1.53 + // the i2c_adapters need Method* in G5_method (right? %%%) 1.54 // do the call 1.55 #ifdef ASSERT 1.56 { 1.57 @@ -725,20 +724,18 @@ 1.58 if (should_set_CC == set_CC) tst(Rdst); 1.59 } 1.60 1.61 - 1.62 -void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp, 1.63 +void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 1.64 int bcp_offset, size_t index_size) { 1.65 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 1.66 if (index_size == sizeof(u2)) { 1.67 - get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 1.68 + get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 1.69 } else if (index_size == sizeof(u4)) { 1.70 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 1.71 - get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); 1.72 - assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 1.73 - xor3(tmp, -1, tmp); // convert to plain index 1.74 + get_4_byte_integer_at_bcp(bcp_offset, temp, index); 1.75 + assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 1.76 + xor3(index, -1, index); // convert to plain index 1.77 } else if (index_size == sizeof(u1)) { 1.78 - assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); 1.79 - ldub(Lbcp, bcp_offset, tmp); 1.80 + ldub(Lbcp, bcp_offset, index); 1.81 } else { 1.82 ShouldNotReachHere(); 1.83 } 1.84 @@ -765,7 +762,7 @@ 1.85 int bcp_offset, 1.86 size_t index_size) { 1.87 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 1.88 - ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 1.89 + ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 1.90 const int shift_count = (1 + byte_no) * BitsPerByte; 1.91 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 1.92 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 1.93 @@ -790,12 +787,32 @@ 1.94 // and from word index to byte offset 1.95 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 1.96 // skip past the header 1.97 - add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp); 1.98 + add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 1.99 // construct pointer to cache entry 1.100 add(LcpoolCache, tmp, cache); 1.101 } 1.102 1.103 1.104 +// Load object from cpool->resolved_references(index) 1.105 +void InterpreterMacroAssembler::load_resolved_reference_at_index( 1.106 + Register result, Register index) { 1.107 + assert_different_registers(result, index); 1.108 + assert_not_delayed(); 1.109 + // convert from field index to resolved_references() index and from 1.110 + // word index to byte offset. Since this is a java object, it can be compressed 1.111 + Register tmp = index; // reuse 1.112 + sll(index, LogBytesPerHeapOop, tmp); 1.113 + get_constant_pool(result); 1.114 + // load pointer for resolved_references[] objArray 1.115 + ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 1.116 + // JNIHandles::resolve(result) 1.117 + ld_ptr(result, 0, result); 1.118 + // Add in the index 1.119 + add(result, tmp, result); 1.120 + load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 1.121 +} 1.122 + 1.123 + 1.124 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 1.125 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 1.126 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 1.127 @@ -939,25 +956,25 @@ 1.128 1.129 1.130 void InterpreterMacroAssembler::get_const(Register Rdst) { 1.131 - ld_ptr(Lmethod, in_bytes(methodOopDesc::const_offset()), Rdst); 1.132 + ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 1.133 } 1.134 1.135 1.136 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 1.137 get_const(Rdst); 1.138 - ld_ptr(Rdst, in_bytes(constMethodOopDesc::constants_offset()), Rdst); 1.139 + ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 1.140 } 1.141 1.142 1.143 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 1.144 get_constant_pool(Rdst); 1.145 - ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst); 1.146 + ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 1.147 } 1.148 1.149 1.150 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 1.151 get_constant_pool(Rcpool); 1.152 - ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags); 1.153 + ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 1.154 } 1.155 1.156 1.157 @@ -985,7 +1002,7 @@ 1.158 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 1.159 1.160 // check if synchronized method 1.161 - const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); 1.162 + const Address access_flags(Lmethod, Method::access_flags_offset()); 1.163 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1.164 push(state); // save tos 1.165 ld(access_flags, G3_scratch); // Load access flags. 1.166 @@ -1121,7 +1138,6 @@ 1.167 notify_method_exit(false, state, NotifyJVMTI); 1.168 1.169 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1.170 - verify_oop(Lmethod); 1.171 verify_thread(); 1.172 1.173 // return tos 1.174 @@ -1295,16 +1311,16 @@ 1.175 1.176 #ifndef CC_INTERP 1.177 1.178 -// Get the method data pointer from the methodOop and set the 1.179 +// Get the method data pointer from the Method* and set the 1.180 // specified register to its value. 1.181 1.182 void InterpreterMacroAssembler::set_method_data_pointer() { 1.183 assert(ProfileInterpreter, "must be profiling interpreter"); 1.184 Label get_continue; 1.185 1.186 - ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1.187 + ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1.188 test_method_data_pointer(get_continue); 1.189 - add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1.190 + add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1.191 bind(get_continue); 1.192 } 1.193 1.194 @@ -1315,10 +1331,10 @@ 1.195 Label zero_continue; 1.196 1.197 // Test MDO to avoid the call if it is NULL. 1.198 - ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1.199 + ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1.200 test_method_data_pointer(zero_continue); 1.201 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1.202 - add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1.203 + add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1.204 add(ImethodDataPtr, O0, ImethodDataPtr); 1.205 bind(zero_continue); 1.206 } 1.207 @@ -1339,8 +1355,8 @@ 1.208 // If the mdp is valid, it will point to a DataLayout header which is 1.209 // consistent with the bcp. The converse is highly probable also. 1.210 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1.211 - ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); 1.212 - add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); 1.213 + ld_ptr(Lmethod, Method::const_offset(), O5); 1.214 + add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1.215 add(G3_scratch, O5, G3_scratch); 1.216 cmp(Lbcp, G3_scratch); 1.217 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1.218 @@ -2072,14 +2088,14 @@ 1.219 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { 1.220 assert(UseCompiler, "incrementing must be useful"); 1.221 #ifdef CC_INTERP 1.222 - Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 1.223 + Address inv_counter(G5_method, Method::invocation_counter_offset() + 1.224 InvocationCounter::counter_offset()); 1.225 - Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 1.226 + Address be_counter (G5_method, Method::backedge_counter_offset() + 1.227 InvocationCounter::counter_offset()); 1.228 #else 1.229 - Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 1.230 + Address inv_counter(Lmethod, Method::invocation_counter_offset() + 1.231 InvocationCounter::counter_offset()); 1.232 - Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 1.233 + Address be_counter (Lmethod, Method::backedge_counter_offset() + 1.234 InvocationCounter::counter_offset()); 1.235 #endif /* CC_INTERP */ 1.236 int delta = InvocationCounter::count_increment; 1.237 @@ -2108,14 +2124,14 @@ 1.238 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { 1.239 assert(UseCompiler, "incrementing must be useful"); 1.240 #ifdef CC_INTERP 1.241 - Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 1.242 + Address be_counter (G5_method, Method::backedge_counter_offset() + 1.243 InvocationCounter::counter_offset()); 1.244 - Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 1.245 + Address inv_counter(G5_method, Method::invocation_counter_offset() + 1.246 InvocationCounter::counter_offset()); 1.247 #else 1.248 - Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 1.249 + Address be_counter (Lmethod, Method::backedge_counter_offset() + 1.250 InvocationCounter::counter_offset()); 1.251 - Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 1.252 + Address inv_counter(Lmethod, Method::invocation_counter_offset() + 1.253 InvocationCounter::counter_offset()); 1.254 #endif /* CC_INTERP */ 1.255 int delta = InvocationCounter::count_increment; 1.256 @@ -2152,7 +2168,7 @@ 1.257 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 1.258 1.259 // When ProfileInterpreter is on, the backedge_count comes from the 1.260 - // methodDataOop, which value does not get reset on the call to 1.261 + // MethodData*, which value does not get reset on the call to 1.262 // frequency_counter_overflow(). To avoid excessive calls to the overflow 1.263 // routine while the method is being compiled, add a second test to make sure 1.264 // the overflow function is called only once every overflow_frequency. 1.265 @@ -2212,10 +2228,10 @@ 1.266 1.267 1.268 // local helper function for the verify_oop_or_return_address macro 1.269 -static bool verify_return_address(methodOopDesc* m, int bci) { 1.270 +static bool verify_return_address(Method* m, int bci) { 1.271 #ifndef PRODUCT 1.272 address pc = (address)(m->constMethod()) 1.273 - + in_bytes(constMethodOopDesc::codes_offset()) + bci; 1.274 + + in_bytes(ConstMethod::codes_offset()) + bci; 1.275 // assume it is a valid return address if it is inside m and is preceded by a jsr 1.276 if (!m->contains(pc)) return false; 1.277 address jsr_pc;