1.1 --- a/src/cpu/mips/vm/templateTable_mips_64.cpp Mon Sep 18 13:48:37 2017 +0800 1.2 +++ b/src/cpu/mips/vm/templateTable_mips_64.cpp Mon Sep 18 16:49:35 2017 +0800 1.3 @@ -2670,27 +2670,34 @@ 1.4 // Check to see if a field access watch has been set before we 1.5 // take the time to call into the VM. 1.6 Label L1; 1.7 - assert_different_registers(cache, index, FSR); 1.8 + // kill FSR 1.9 + Register tmp1 = T2; 1.10 + Register tmp2 = T1; 1.11 + Register tmp3 = T3; 1.12 + assert_different_registers(cache, index, AT); 1.13 __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr()); 1.14 - __ lw(FSR, AT, 0); 1.15 - __ beq(FSR, R0, L1); 1.16 + __ lw(AT, AT, 0); 1.17 + __ beq(AT, R0, L1); 1.18 __ delayed()->nop(); 1.19 1.20 - // We rely on the bytecode being resolved and the cpCache entry filled in. 1.21 + __ get_cache_and_index_at_bcp(tmp2, tmp3, 1); 1.22 + 1.23 // cache entry pointer 1.24 - __ daddi(cache, cache, in_bytes(ConstantPoolCache::base_offset())); 1.25 - __ shl(index, 4); 1.26 - __ dadd(cache, cache, index); 1.27 + __ daddi(tmp2, tmp2, in_bytes(ConstantPoolCache::base_offset())); 1.28 + __ shl(tmp3, LogBytesPerWord); 1.29 + __ dadd(tmp2, tmp2, tmp3); 1.30 if (is_static) { 1.31 - __ move(FSR, R0); 1.32 + __ move(tmp1, R0); 1.33 } else { 1.34 - __ lw(FSR, SP, 0); 1.35 - __ verify_oop(FSR); 1.36 + __ ld(tmp1, SP, 0); 1.37 + __ verify_oop(tmp1); 1.38 } 1.39 - // FSR: object pointer or NULL 1.40 - // cache: cache entry pointer 1.41 + // tmp1: object pointer or NULL 1.42 + // tmp2: cache entry pointer 1.43 + // tmp3: jvalue object on the stack 1.44 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, 1.45 - InterpreterRuntime::post_field_access), FSR, cache); 1.46 + InterpreterRuntime::post_field_access), 1.47 + tmp1, tmp2, tmp3); 1.48 __ get_cache_and_index_at_bcp(cache, index, 1); 1.49 __ bind(L1); 1.50 } 1.51 @@ -2729,7 +2736,7 @@ 1.52 const Register off = T2; 1.53 const Register flags = T1; 1.54 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 1.55 - //jvmti_post_field_access(cache, index, is_static, false); 1.56 + jvmti_post_field_access(cache, index, is_static, false); 1.57 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 1.58 1.59 if (!is_static) pop_and_check_object(obj); 1.60 @@ -2885,74 +2892,75 @@ 1.61 // The registers cache and index expected to be set before call. 1.62 // The function may destroy various registers, just not the cache and index registers. 1.63 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { 1.64 + transition(vtos, vtos); 1.65 + 1.66 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 1.67 1.68 if (JvmtiExport::can_post_field_modification()) { 1.69 - // Check to see if a field modification watch has been set before we take 1.70 - // the time to call into the VM. 1.71 + // Check to see if a field modification watch has been set before 1.72 + // we take the time to call into the VM. 1.73 Label L1; 1.74 - assert_different_registers(cache, index, AT); 1.75 + //kill AT, T1, T2, T3, T9 1.76 + Register tmp1 = T2; 1.77 + Register tmp2 = T1; 1.78 + Register tmp3 = T3; 1.79 + Register tmp4 = T9; 1.80 + assert_different_registers(cache, index, tmp4); 1.81 1.82 __ li(AT, JvmtiExport::get_field_modification_count_addr()); 1.83 - __ lw(FSR, AT, 0); 1.84 - __ beq(FSR, R0, L1); 1.85 + __ lw(AT, AT, 0); 1.86 + __ beq(AT, R0, L1); 1.87 __ delayed()->nop(); 1.88 1.89 - /* // We rely on the bytecode being resolved and the cpCache entry filled in. 1.90 - resolve_cache_and_index(byte_no, T1, T1); 1.91 - */ 1.92 - // The cache and index registers have been already set. 1.93 - // This allows to eliminate this call but the cache and index 1.94 - // registers have to be correspondingly used after this line. 1.95 - __ get_cache_and_index_at_bcp(T1, T9, 1); 1.96 + __ get_cache_and_index_at_bcp(tmp2, tmp4, 1); 1.97 1.98 if (is_static) { 1.99 - __ move(T2, R0); 1.100 + __ move(tmp1, R0); 1.101 } else { 1.102 - // Life is harder. The stack holds the value on top, 1.103 - // followed by the object. 1.104 - // We don't know the size of the value, though; 1.105 - // it could be one or two words 1.106 - // depending on its type. As a result, we must find 1.107 - // the type to determine where the object is. 1.108 + // Life is harder. The stack holds the value on top, followed by 1.109 + // the object. We don't know the size of the value, though; it 1.110 + // could be one or two words depending on its type. As a result, 1.111 + // we must find the type to determine where the object is. 1.112 Label two_word, valsize_known; 1.113 - __ dsll(AT, T1, 4); 1.114 - __ dadd(AT, T1, AT); 1.115 - __ lw(T3, AT, in_bytes(cp_base_offset 1.116 - + ConstantPoolCacheEntry::flags_offset())); 1.117 - __ move(T2, SP); 1.118 - __ shr(T3, ConstantPoolCacheEntry::tos_state_shift); 1.119 + __ dsll(AT, tmp4, Address::times_8); 1.120 + __ dadd(AT, tmp2, AT); 1.121 + __ ld(tmp3, AT, in_bytes(cp_base_offset + 1.122 + ConstantPoolCacheEntry::flags_offset())); 1.123 + __ shr(tmp3, ConstantPoolCacheEntry::tos_state_shift); 1.124 1.125 // Make sure we don't need to mask ecx for tos_state_shift 1.126 // after the above shift 1.127 ConstantPoolCacheEntry::verify_tos_state_shift(); 1.128 + __ move(tmp1, SP); 1.129 __ move(AT, ltos); 1.130 - __ beq(T3, AT, two_word); 1.131 + __ beq(tmp3, AT, two_word); 1.132 __ delayed()->nop(); 1.133 __ move(AT, dtos); 1.134 - __ beq(T3, AT, two_word); 1.135 + __ beq(tmp3, AT, two_word); 1.136 __ delayed()->nop(); 1.137 __ b(valsize_known); 1.138 - __ delayed()->daddi(T2, T2,Interpreter::expr_offset_in_bytes(1) ); 1.139 + __ delayed()->daddi(tmp1, tmp1, Interpreter::expr_offset_in_bytes(1) ); 1.140 1.141 __ bind(two_word); 1.142 - __ daddi(T2, T2,Interpreter::expr_offset_in_bytes(2)); 1.143 + __ daddi(tmp1, tmp1, Interpreter::expr_offset_in_bytes(2)); 1.144 1.145 __ bind(valsize_known); 1.146 // setup object pointer 1.147 - __ lw(T2, T2, 0*wordSize); 1.148 + __ ld(tmp1, tmp1, 0*wordSize); 1.149 } 1.150 // cache entry pointer 1.151 - __ daddi(T1, T1, in_bytes(cp_base_offset)); 1.152 - __ shl(T1, 4); 1.153 - __ daddu(T1, T1, T1); 1.154 + __ daddi(tmp2, tmp2, in_bytes(cp_base_offset)); 1.155 + __ shl(tmp4, LogBytesPerWord); 1.156 + __ daddu(tmp2, tmp2, tmp4); 1.157 // object (tos) 1.158 - __ move(T3, SP); 1.159 - // T2: object pointer set up above (NULL if static) 1.160 - // T1: cache entry pointer 1.161 - // T3: jvalue object on the stack 1.162 - __ call_VM(NOREG, CAST_FROM_FN_PTR(address, 1.163 - InterpreterRuntime::post_field_modification), T2, T1, T3); 1.164 + __ move(tmp3, SP); 1.165 + // tmp1: object pointer set up above (NULL if static) 1.166 + // tmp2: cache entry pointer 1.167 + // tmp3: jvalue object on the stack 1.168 + __ call_VM(NOREG, 1.169 + CAST_FROM_FN_PTR(address, 1.170 + InterpreterRuntime::post_field_modification), 1.171 + tmp1, tmp2, tmp3); 1.172 __ get_cache_and_index_at_bcp(cache, index, 1); 1.173 __ bind(L1); 1.174 } 1.175 @@ -2975,7 +2983,7 @@ 1.176 const Register bc = T3; 1.177 1.178 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 1.179 - //jvmti_post_field_mod(cache, index, is_static); 1.180 + jvmti_post_field_mod(cache, index, is_static); 1.181 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 1.182 1.183 Label notVolatile, Done; 1.184 @@ -3196,62 +3204,51 @@ 1.185 // Check to see if a field modification watch has been set before 1.186 // we take the time to call into the VM. 1.187 Label L2; 1.188 + //kill AT, T1, T2, T3, T9 1.189 + Register tmp1 = T2; 1.190 + Register tmp2 = T1; 1.191 + Register tmp3 = T3; 1.192 + Register tmp4 = T9; 1.193 __ li(AT, JvmtiExport::get_field_modification_count_addr()); 1.194 - __ lw(T3, AT, 0); 1.195 - __ beq(T3, R0, L2); 1.196 + __ lw(tmp3, AT, 0); 1.197 + __ beq(tmp3, R0, L2); 1.198 __ delayed()->nop(); 1.199 - __ pop_ptr(T2); 1.200 - __ verify_oop(T2); 1.201 - __ push_ptr(T2); 1.202 - __ li(AT, -sizeof(jvalue)); 1.203 - __ daddu(SP, SP, AT); 1.204 - __ move(T3, SP); 1.205 - 1.206 + __ pop_ptr(tmp1); 1.207 + __ verify_oop(tmp1); 1.208 + __ push_ptr(tmp1); 1.209 switch (bytecode()) { // load values into the jvalue object 1.210 - case Bytecodes::_fast_bputfield: 1.211 - __ sb(FSR, SP, 0); 1.212 - break; 1.213 - case Bytecodes::_fast_sputfield: 1.214 - __ sh(FSR, SP, 0); 1.215 - break; 1.216 - case Bytecodes::_fast_cputfield: 1.217 - __ sh(FSR, SP, 0); 1.218 - break; 1.219 - case Bytecodes::_fast_iputfield: 1.220 - __ sw(FSR, SP, 0); 1.221 - break; 1.222 - case Bytecodes::_fast_lputfield: 1.223 - __ sd(FSR, SP, 0); 1.224 - break; 1.225 - case Bytecodes::_fast_fputfield: 1.226 - __ swc1(FSF, SP, 0); 1.227 - break; 1.228 - case Bytecodes::_fast_dputfield: 1.229 - __ sdc1(FSF, SP, 0); 1.230 - break; 1.231 - case Bytecodes::_fast_aputfield: 1.232 - __ sd(FSR, SP, 0); 1.233 - break; 1.234 + case Bytecodes::_fast_aputfield: __ push_ptr(FSR); break; 1.235 + case Bytecodes::_fast_bputfield: // fall through 1.236 + case Bytecodes::_fast_sputfield: // fall through 1.237 + case Bytecodes::_fast_cputfield: // fall through 1.238 + case Bytecodes::_fast_iputfield: __ push_i(FSR); break; 1.239 + case Bytecodes::_fast_dputfield: __ push_d(FSF); break; 1.240 + case Bytecodes::_fast_fputfield: __ push_f(); break; 1.241 + case Bytecodes::_fast_lputfield: __ push_l(FSR); break; 1.242 default: ShouldNotReachHere(); 1.243 } 1.244 - 1.245 - // Save eax and sometimes edx because call_VM() will clobber them, 1.246 - // then use them for JVM/DI purposes 1.247 - __ push(FSR); 1.248 - if (bytecode() == Bytecodes::_fast_lputfield) __ push(SSR); 1.249 + __ move(tmp3, SP); 1.250 // access constant pool cache entry 1.251 - __ get_cache_entry_pointer_at_bcp(T1, T2, 1); 1.252 - // no need, verified ahead 1.253 - __ verify_oop(T2); 1.254 - 1.255 - // ebx: object pointer copied above 1.256 - // eax: cache entry pointer 1.257 - // ecx: jvalue object on the stack 1.258 - __ call_VM(NOREG, CAST_FROM_FN_PTR(address, 1.259 - InterpreterRuntime::post_field_modification), T2, T1, T3); 1.260 - if (bytecode() == Bytecodes::_fast_lputfield) __ pop(SSR); // restore high value 1.261 - __ lw(FSR, SP, 0); 1.262 - __ daddiu(SP, SP, sizeof(jvalue) + 1 * wordSize); 1.263 + __ get_cache_entry_pointer_at_bcp(tmp2, FSR, 1); 1.264 + __ verify_oop(tmp1); 1.265 + // tmp1: object pointer copied above 1.266 + // tmp2: cache entry pointer 1.267 + // tmp3: jvalue object on the stack 1.268 + __ call_VM(NOREG, 1.269 + CAST_FROM_FN_PTR(address, 1.270 + InterpreterRuntime::post_field_modification), 1.271 + tmp1, tmp2, tmp3); 1.272 + 1.273 + switch (bytecode()) { // restore tos values 1.274 + case Bytecodes::_fast_aputfield: __ pop_ptr(FSR); break; 1.275 + case Bytecodes::_fast_bputfield: // fall through 1.276 + case Bytecodes::_fast_sputfield: // fall through 1.277 + case Bytecodes::_fast_cputfield: // fall through 1.278 + case Bytecodes::_fast_iputfield: __ pop_i(FSR); break; 1.279 + case Bytecodes::_fast_dputfield: __ pop_d(); break; 1.280 + case Bytecodes::_fast_fputfield: __ pop_f(); break; 1.281 + case Bytecodes::_fast_lputfield: __ pop_l(FSR); break; 1.282 + } 1.283 __ bind(L2); 1.284 } 1.285 }