1.1 --- a/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp Sat Jan 06 16:30:58 2018 +0800 1.2 +++ b/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp Thu May 24 19:49:50 2018 +0800 1.3 @@ -316,8 +316,8 @@ 1.4 1.5 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 1.6 jobject o = NULL; 1.7 - PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 1.8 int oop_index = __ oop_recorder()->allocate_oop_index(o); 1.9 + PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 1.10 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.11 __ relocate(rspec); 1.12 #ifndef _LP64 1.13 @@ -326,57 +326,21 @@ 1.14 __ addiu(reg, reg, Assembler::split_low((int)o)); 1.15 #else 1.16 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi 1.17 +// __ li48(reg, (long)o); 1.18 __ li48(reg, (long)o); 1.19 #endif 1.20 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info); 1.21 patching_epilog(patch, lir_patch_normal, reg, info); 1.22 } 1.23 1.24 - 1.25 -void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register unused, int monitor_no, Register exception) { 1.26 - 1.27 - if (exception->is_valid()) { 1.28 - // preserve exception 1.29 - // note: the monitor_exit runtime call is a leaf routine 1.30 - // and cannot block => no GC can happen 1.31 - // The slow case (MonitorAccessStub) uses the first two stack slots 1.32 - // ([SP+0] and [SP+4]), therefore we store the exception at [esp+8] 1.33 - __ st_ptr(exception, SP, 2 * wordSize); 1.34 - } 1.35 - 1.36 - Register obj_reg = obj_opr->as_register(); 1.37 - Register lock_reg = lock_opr->as_register(); 1.38 - 1.39 - // compute pointer to BasicLock 1.40 - //Address lock_addr = frame_map()->address_for_monitor_lock_index(monitor_no); 1.41 - Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no); 1.42 - __ lea(lock_reg, lock_addr); 1.43 - // unlock object 1.44 - MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no); 1.45 - // temporary fix: must be created after exceptionhandler, therefore as call stub 1.46 - _slow_case_stubs->append(slow_case); 1.47 - if (UseFastLocking) { 1.48 - // try inlined fast unlocking first, revert to slow locking if it fails 1.49 - // note: lock_reg points to the displaced header since the displaced header offset is 0! 1.50 - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 1.51 - __ unlock_object(NOREG, obj_reg, lock_reg, *slow_case->entry()); 1.52 - } else { 1.53 - // always do slow unlocking 1.54 - // note: the slow unlocking code could be inlined here, however if we use 1.55 - // slow unlocking, speed doesn't matter anyway and this solution is 1.56 - // simpler and requires less duplicated code - additionally, the 1.57 - // slow unlocking code is the same in either case which simplifies 1.58 - // debugging 1.59 - __ b_far(*slow_case->entry()); 1.60 - __ delayed()->nop(); 1.61 - } 1.62 - // done 1.63 - __ bind(*slow_case->continuation()); 1.64 - 1.65 - if (exception->is_valid()) { 1.66 - // restore exception 1.67 - __ ld_ptr(exception, SP, 2 * wordSize); 1.68 - } 1.69 +void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 1.70 + Metadata *o = NULL; 1.71 + int index = __ oop_recorder()->allocate_metadata_index(o); 1.72 + PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 1.73 + RelocationHolder rspec = metadata_Relocation::spec(index); 1.74 + __ relocate(rspec); 1.75 + __ li48(reg, (long)o); 1.76 + patching_epilog(patch, lir_patch_normal, reg, info); 1.77 } 1.78 1.79 // This specifies the esp decrement needed to build the frame 1.80 @@ -405,61 +369,28 @@ 1.81 // generate code for exception handler 1.82 address handler_base = __ start_a_stub(exception_handler_size); 1.83 if (handler_base == NULL) { 1.84 - //no enough space 1.85 + // no enough space 1.86 bailout("exception handler overflow"); 1.87 return -1; 1.88 } 1.89 1.90 - 1.91 - 1.92 - //compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset()); 1.93 - // if the method does not have an exception handler, then there is 1.94 - // no reason to search for one 1.95 - //if (compilation()->has_exception_handlers() || JvmtiExport::can_post_exceptions()) { 1.96 - // the exception oop and pc are in V0 and V1 1.97 - // no other registers need to be preserved, so invalidate them 1.98 - // check that there is really an exception 1.99 -// __ verify_not_null_oop(V0); 1.100 - 1.101 - // search an exception handler (V0: exception oop, V1: throwing pc) 1.102 -// __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id), 1.103 -// relocInfo::runtime_call_type); 1.104 -// __ delayed()->nop(); 1.105 - // if the call returns here, then the exception handler for particular 1.106 - // exception doesn't exist -> unwind activation and forward exception to caller 1.107 - // } 1.108 - int offset = code_offset(); 1.109 - 1.110 - // the exception oop is in V0 1.111 + int offset = code_offset(); 1.112 + 1.113 + // the exception oop and pc are in V0, and V1 1.114 // no other registers need to be preserved, so invalidate them 1.115 + //__ invalidate_registers(false, true, true, false, true, true); 1.116 + 1.117 // check that there is really an exception 1.118 __ verify_not_null_oop(V0); 1.119 - //FIXME:wuhui?? 1.120 - //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 1.121 - //__ delayed()->nop(); 1.122 - __ should_not_reach_here(); 1.123 - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 1.124 - __ end_a_stub(); 1.125 - return offset; 1.126 - 1.127 - // unlock the receiver/klass if necessary 1.128 - // V0: exception 1.129 -// ciMethod* method = compilation()->method(); 1.130 -// if (method->is_synchronized() && GenerateSynchronizationCode) { 1.131 -//#ifndef _LP64 1.132 -//by_css 1.133 -// monitorexit(FrameMap::_t0_oop_opr, FrameMap::_t6_opr, NOREG, 0, V0); 1.134 -//#else 1.135 -// monitorexit(FrameMap::_t0_oop_opr, FrameMap::_a6_opr, NOREG, 0, V0); 1.136 -//#endif 1.137 -// } 1.138 - 1.139 - // unwind activation and forward exception to caller 1.140 - // V0: exception 1.141 -// __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id), 1.142 -// relocInfo::runtime_call_type); 1.143 -// __ delayed()->nop(); 1.144 -// __ end_a_stub(); 1.145 + 1.146 + // search an exception handler (V0: exception oop, V1: throwing pc) 1.147 + __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 1.148 + __ delayed()->nop(); 1.149 + __ should_not_reach_here(); 1.150 + guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 1.151 + __ end_a_stub(); 1.152 + 1.153 + return offset; 1.154 } 1.155 1.156 // Emit the code to remove the frame from the stack in the exception 1.157 @@ -472,46 +403,52 @@ 1.158 #endif 1.159 1.160 int offset = code_offset(); 1.161 - /* // Fetch the exception from TLS and clear out exception related thread state 1.162 - __ get_thread(rsi); 1.163 - __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); 1.164 - __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); 1.165 - __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); 1.166 + // Fetch the exception from TLS and clear out exception related thread state 1.167 + Register thread = TREG; 1.168 +#ifndef OPT_THREAD 1.169 + __ get_thread(thread); 1.170 +#endif 1.171 + __ ld_ptr(V0, Address(thread, JavaThread::exception_oop_offset())); 1.172 + __ st_ptr(R0, Address(thread, JavaThread::exception_oop_offset())); 1.173 + __ st_ptr(R0, Address(thread, JavaThread::exception_pc_offset())); 1.174 1.175 __ bind(_unwind_handler_entry); 1.176 - __ verify_not_null_oop(rax); 1.177 + __ verify_not_null_oop(V0); 1.178 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 1.179 - __ mov(rsi, rax); // Preserve the exception 1.180 + __ move(S0, V0); // Preserve the exception (rbx is always callee-saved) 1.181 } 1.182 - // Preform needed unlocking 1.183 - MonitorExitStub* stub = NULL; 1.184 + 1.185 + // Preform needed unlocking 1.186 + MonitorExitStub* stub = NULL; 1.187 if (method()->is_synchronized()) { 1.188 - monitor_address(0, FrameMap::rax_opr); 1.189 - stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 1.190 - __ unlock_object(rdi, rbx, rax, *stub->entry()); 1.191 + monitor_address(0, FrameMap::_v0_opr); 1.192 + stub = new MonitorExitStub(FrameMap::_v0_opr, true, 0); 1.193 + __ unlock_object(A0, A1, V0, *stub->entry()); 1.194 __ bind(*stub->continuation()); 1.195 } 1.196 1.197 if (compilation()->env()->dtrace_method_probes()) { 1.198 - __ get_thread(rax); 1.199 - __ movptr(Address(rsp, 0), rax); 1.200 - __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 1.201 - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 1.202 + __ move(A0, thread); 1.203 + __ mov_metadata(A1, method()->constant_encoding()); 1.204 + __ patchable_call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)); 1.205 } 1.206 1.207 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 1.208 - __ mov(rax, rsi); // Restore the exception 1.209 + __ move(V0, S0); // Restore the exception 1.210 } 1.211 1.212 // remove the activation and dispatch to the unwind handler 1.213 - __ remove_frame(initial_frame_size_in_bytes()); 1.214 - __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 1.215 + // leave activation of nmethod 1.216 + __ remove_frame(initial_frame_size_in_bytes()); 1.217 + 1.218 + __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id)); 1.219 + __ delayed()->nop(); 1.220 1.221 // Emit the slow path assembly 1.222 - if (stub != NULL) { 1.223 + if (stub != NULL) { 1.224 stub->emit_code(this); 1.225 } 1.226 -*/ 1.227 + 1.228 return offset; 1.229 } 1.230 1.231 @@ -653,7 +590,7 @@ 1.232 void LIR_Assembler::return_op(LIR_Opr result) { 1.233 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0"); 1.234 // Pop the stack before the safepoint code 1.235 - __ leave(); 1.236 + __ remove_frame(initial_frame_size_in_bytes()); 1.237 #ifndef _LP64 1.238 //by aoqi 1.239 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() 1.240 @@ -674,6 +611,7 @@ 1.241 #endif 1.242 #endif 1.243 1.244 + __ pop(RA); 1.245 __ jr(RA); 1.246 __ delayed()->nop(); 1.247 } 1.248 @@ -724,16 +662,15 @@ 1.249 assert(dest->is_register(), "should not call otherwise"); 1.250 LIR_Const* c = src->as_constant_ptr(); 1.251 switch (c->type()) { 1.252 + case T_ADDRESS: { 1.253 + assert(patch_code == lir_patch_none, "no patching handled here"); 1.254 + __ move(dest->as_register(), c->as_jint()); // FIXME 1.255 + break; 1.256 + } 1.257 + 1.258 case T_INT: { 1.259 - jint con = c->as_jint(); 1.260 - if (dest->is_single_cpu()) { 1.261 - assert(patch_code == lir_patch_none, "no patching handled here"); 1.262 - __ move(dest->as_register(), con); 1.263 - } else { 1.264 - assert(dest->is_single_fpu(), "wrong register kind"); 1.265 - __ move(AT, con); 1.266 - __ mtc1(AT, dest->as_float_reg()); 1.267 - } 1.268 + assert(patch_code == lir_patch_none, "no patching handled here"); 1.269 + __ move(dest->as_register(), c->as_jint()); 1.270 break; 1.271 } 1.272 1.273 @@ -772,6 +709,15 @@ 1.274 break; 1.275 } 1.276 1.277 + case T_METADATA: { 1.278 + if (patch_code != lir_patch_none) { 1.279 + klass2reg_with_patching(dest->as_register(), info); 1.280 + } else { 1.281 + __ mov_metadata(dest->as_register(), c->as_metadata()); 1.282 + } 1.283 + break; 1.284 + } 1.285 + 1.286 case T_FLOAT: { 1.287 address const_addr = float_constant(c->as_jfloat()); 1.288 assert (const_addr != NULL, "must create float constant in the constant table"); 1.289 @@ -855,6 +801,11 @@ 1.290 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix())); 1.291 break; 1.292 1.293 + case T_ADDRESS: 1.294 + __ move(AT, c->as_jint_bits()); 1.295 + __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix())); 1.296 + break; 1.297 + 1.298 case T_OBJECT: 1.299 if (c->as_jobject() == NULL) { 1.300 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix())); 1.301 @@ -957,6 +908,10 @@ 1.302 } else 1.303 __ sw(R0, as_Address(addr)); 1.304 break; 1.305 + case T_ADDRESS: 1.306 + __ move(AT, c->as_jint_bits()); 1.307 + __ st_ptr(AT, as_Address(addr)); 1.308 + break; 1.309 case T_BOOLEAN: // fall through 1.310 case T_BYTE: 1.311 if(c->as_jint() != 0) { 1.312 @@ -1217,7 +1172,7 @@ 1.313 break; 1.314 1.315 case T_LONG: { 1.316 - Register from_lo = src->as_register_lo(); 1.317 + Register from_lo = src->as_register_lo(); 1.318 Register from_hi = src->as_register_hi(); 1.319 #ifdef _LP64 1.320 if (needs_patching) { 1.321 @@ -1535,42 +1490,55 @@ 1.322 case T_ARRAY: 1.323 if (UseCompressedOops && !wide) { 1.324 if (disp_reg == noreg) { 1.325 - __ lw(dest->as_register(), src_reg, disp_value); 1.326 + __ lwu(dest->as_register(), src_reg, disp_value); 1.327 } else if (needs_patching) { 1.328 - __ dadd(AT, src_reg, disp_reg); 1.329 - offset = code_offset(); 1.330 - __ lw(dest->as_register(), AT, 0); 1.331 + __ dadd(AT, src_reg, disp_reg); 1.332 + offset = code_offset(); 1.333 + __ lwu(dest->as_register(), AT, 0); 1.334 } else { 1.335 - __ dadd(AT, src_reg, disp_reg); 1.336 - offset = code_offset(); 1.337 - __ lw(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.338 + __ dadd(AT, src_reg, disp_reg); 1.339 + offset = code_offset(); 1.340 + __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.341 } 1.342 - 1.343 } else { 1.344 if (disp_reg == noreg) { 1.345 - __ ld_ptr(dest->as_register(), src_reg, disp_value); 1.346 + __ ld_ptr(dest->as_register(), src_reg, disp_value); 1.347 } else if (needs_patching) { 1.348 - __ dadd(AT, src_reg, disp_reg); 1.349 - offset = code_offset(); 1.350 - __ ld_ptr(dest->as_register(), AT, 0); 1.351 + __ dadd(AT, src_reg, disp_reg); 1.352 + offset = code_offset(); 1.353 + __ ld_ptr(dest->as_register(), AT, 0); 1.354 } else { 1.355 - __ dadd(AT, src_reg, disp_reg); 1.356 - offset = code_offset(); 1.357 - __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.358 + __ dadd(AT, src_reg, disp_reg); 1.359 + offset = code_offset(); 1.360 + __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.361 } 1.362 } 1.363 break; 1.364 case T_ADDRESS: 1.365 - if (disp_reg == noreg) { 1.366 + if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1.367 + if (disp_reg == noreg) { 1.368 + __ lwu(dest->as_register(), src_reg, disp_value); 1.369 + } else if (needs_patching) { 1.370 + __ dadd(AT, src_reg, disp_reg); 1.371 + offset = code_offset(); 1.372 + __ lwu(dest->as_register(), AT, 0); 1.373 + } else { 1.374 + __ dadd(AT, src_reg, disp_reg); 1.375 + offset = code_offset(); 1.376 + __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.377 + } 1.378 + } else { 1.379 + if (disp_reg == noreg) { 1.380 __ ld_ptr(dest->as_register(), src_reg, disp_value); 1.381 - } else if (needs_patching) { 1.382 + } else if (needs_patching) { 1.383 __ dadd(AT, src_reg, disp_reg); 1.384 offset = code_offset(); 1.385 __ ld_ptr(dest->as_register(), AT, 0); 1.386 - } else { 1.387 + } else { 1.388 __ dadd(AT, src_reg, disp_reg); 1.389 offset = code_offset(); 1.390 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value)); 1.391 + } 1.392 } 1.393 break; 1.394 case T_INT: { 1.395 @@ -1704,6 +1672,10 @@ 1.396 } 1.397 #endif 1.398 __ verify_oop(dest->as_register()); 1.399 + } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1.400 + if (UseCompressedClassPointers) { 1.401 + __ decode_klass_not_null(dest->as_register()); 1.402 + } 1.403 } 1.404 if (info != NULL) add_debug_info_for_null_check(offset, info); 1.405 } 1.406 @@ -3088,11 +3060,197 @@ 1.407 __ bind(*op->stub()->continuation()); 1.408 } 1.409 1.410 +void LIR_Assembler::type_profile_helper(Register mdo, 1.411 + ciMethodData *md, ciProfileData *data, 1.412 + Register recv, Label* update_done) { 1.413 + for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1.414 + Label next_test; 1.415 + // See if the receiver is receiver[n]. 1.416 + __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1.417 + __ bne(AT, recv, next_test); 1.418 + __ delayed()->nop(); 1.419 + Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1.420 + __ ld_ptr(AT, data_addr); 1.421 + __ addi(AT, AT, DataLayout::counter_increment); 1.422 + __ st_ptr(AT, data_addr); 1.423 + __ b(*update_done); 1.424 + __ delayed()->nop(); 1.425 + __ bind(next_test); 1.426 + } 1.427 + 1.428 + // Didn't find receiver; find next empty slot and fill it in 1.429 + for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1.430 + Label next_test; 1.431 + Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1.432 + __ ld_ptr(AT, recv_addr); 1.433 + __ bne(AT, R0, next_test); 1.434 + __ delayed()->nop(); 1.435 + __ st_ptr(recv, recv_addr); 1.436 + __ move(AT, DataLayout::counter_increment); 1.437 + __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); 1.438 + __ b(*update_done); 1.439 + __ delayed()->nop(); 1.440 + __ bind(next_test); 1.441 + } 1.442 +} 1.443 + 1.444 +void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1.445 + // we always need a stub for the failure case. 1.446 + CodeStub* stub = op->stub(); 1.447 + Register obj = op->object()->as_register(); 1.448 + Register k_RInfo = op->tmp1()->as_register(); 1.449 + Register klass_RInfo = op->tmp2()->as_register(); 1.450 + Register dst = op->result_opr()->as_register(); 1.451 + ciKlass* k = op->klass(); 1.452 + Register Rtmp1 = noreg; 1.453 + 1.454 + // check if it needs to be profiled 1.455 + ciMethodData* md; 1.456 + ciProfileData* data; 1.457 + 1.458 + if (op->should_profile()) { 1.459 + ciMethod* method = op->profiled_method(); 1.460 + assert(method != NULL, "Should have method"); 1.461 + int bci = op->profiled_bci(); 1.462 + md = method->method_data_or_null(); 1.463 + assert(md != NULL, "Sanity"); 1.464 + data = md->bci_to_data(bci); 1.465 + assert(data != NULL, "need data for type check"); 1.466 + assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1.467 + } 1.468 + Label profile_cast_success, profile_cast_failure; 1.469 + Label *success_target = op->should_profile() ? &profile_cast_success : success; 1.470 + Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 1.471 + 1.472 + if (obj == k_RInfo) { 1.473 + k_RInfo = dst; 1.474 + } else if (obj == klass_RInfo) { 1.475 + klass_RInfo = dst; 1.476 + } 1.477 + if (k->is_loaded() && !UseCompressedClassPointers) { 1.478 + select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1.479 + } else { 1.480 + Rtmp1 = op->tmp3()->as_register(); 1.481 + select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1.482 + } 1.483 + 1.484 + assert_different_registers(obj, k_RInfo, klass_RInfo); 1.485 + 1.486 + if (op->should_profile()) { 1.487 + Label not_null; 1.488 + __ bne(obj, R0, not_null); 1.489 + __ delayed()->nop(); 1.490 + // Object is null; update MDO and exit 1.491 + Register mdo = klass_RInfo; 1.492 + __ mov_metadata(mdo, md->constant_encoding()); 1.493 + Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1.494 + int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1.495 + __ lw(AT, data_addr); 1.496 + __ ori(AT, AT, header_bits); 1.497 + __ sw(AT,data_addr); 1.498 + __ b(*obj_is_null); 1.499 + __ delayed()->nop(); 1.500 + __ bind(not_null); 1.501 + } else { 1.502 + __ beq(obj, R0, *obj_is_null); 1.503 + __ delayed()->nop(); 1.504 + } 1.505 + 1.506 + if (!k->is_loaded()) { 1.507 + klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1.508 + } else { 1.509 +#ifdef _LP64 1.510 + __ mov_metadata(k_RInfo, k->constant_encoding()); 1.511 +#endif // _LP64 1.512 + } 1.513 + __ verify_oop(obj); 1.514 + 1.515 + if (op->fast_check()) { 1.516 + // get object class 1.517 + // not a safepoint as obj null check happens earlier 1.518 + if (UseCompressedClassPointers) { 1.519 + __ load_klass(Rtmp1, obj); 1.520 + __ bne(k_RInfo, Rtmp1, *failure_target); 1.521 + __ delayed()->nop(); 1.522 + } else { 1.523 + __ ld(AT, Address(obj, oopDesc::klass_offset_in_bytes())); 1.524 + __ bne(k_RInfo, AT, *failure_target); 1.525 + __ delayed()->nop(); 1.526 + } 1.527 + // successful cast, fall through to profile or jump 1.528 + } else { 1.529 + // get object class 1.530 + // not a safepoint as obj null check happens earlier 1.531 + __ load_klass(klass_RInfo, obj); 1.532 + if (k->is_loaded()) { 1.533 + // See if we get an immediate positive hit 1.534 + __ ld(AT, Address(klass_RInfo, k->super_check_offset())); 1.535 + if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1.536 + __ bne(k_RInfo, AT, *failure_target); 1.537 + __ delayed()->nop(); 1.538 + // successful cast, fall through to profile or jump 1.539 + } else { 1.540 + // See if we get an immediate positive hit 1.541 + __ beq(k_RInfo, AT, *success_target); 1.542 + __ delayed()->nop(); 1.543 + // check for self 1.544 + __ beq(k_RInfo, klass_RInfo, *success_target); 1.545 + __ delayed()->nop(); 1.546 + 1.547 + __ push(klass_RInfo); 1.548 + __ push(k_RInfo); 1.549 + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1.550 + __ pop(klass_RInfo); 1.551 + __ pop(klass_RInfo); 1.552 + // result is a boolean 1.553 + __ beq(klass_RInfo, R0, *failure_target); 1.554 + __ delayed()->nop(); 1.555 + // successful cast, fall through to profile or jump 1.556 + } 1.557 + } else { 1.558 + // perform the fast part of the checking logic 1.559 + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1.560 + // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1.561 + __ push(klass_RInfo); 1.562 + __ push(k_RInfo); 1.563 + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1.564 + __ pop(klass_RInfo); 1.565 + __ pop(k_RInfo); 1.566 + // result is a boolean 1.567 + __ beq(k_RInfo, R0, *failure_target); 1.568 + __ delayed()->nop(); 1.569 + // successful cast, fall through to profile or jump 1.570 + } 1.571 + } 1.572 + if (op->should_profile()) { 1.573 + Register mdo = klass_RInfo, recv = k_RInfo; 1.574 + __ bind(profile_cast_success); 1.575 + __ mov_metadata(mdo, md->constant_encoding()); 1.576 + __ load_klass(recv, obj); 1.577 + Label update_done; 1.578 + type_profile_helper(mdo, md, data, recv, success); 1.579 + __ b(*success); 1.580 + __ delayed()->nop(); 1.581 + 1.582 + __ bind(profile_cast_failure); 1.583 + __ mov_metadata(mdo, md->constant_encoding()); 1.584 + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1.585 + __ ld_ptr(AT, counter_addr); 1.586 + __ addi(AT, AT, -DataLayout::counter_increment); 1.587 + __ st_ptr(AT, counter_addr); 1.588 + 1.589 + __ b(*failure); 1.590 + __ delayed()->nop(); 1.591 + } 1.592 + __ b(*success); 1.593 + __ delayed()->nop(); 1.594 +} 1.595 + 1.596 1.597 1.598 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1.599 LIR_Code code = op->code(); 1.600 -// if (code == lir_store_check) { 1.601 + if (code == lir_store_check) { 1.602 Register value = op->object()->as_register(); 1.603 Register array = op->array()->as_register(); 1.604 Register k_RInfo = op->tmp1()->as_register(); 1.605 @@ -3100,133 +3258,109 @@ 1.606 Register tmp = op->tmp3()->as_register(); 1.607 1.608 CodeStub* stub = op->stub(); 1.609 + 1.610 //check if it needs to be profiled 1.611 ciMethodData* md; 1.612 ciProfileData* data; 1.613 + 1.614 if (op->should_profile()) { 1.615 ciMethod* method = op->profiled_method(); 1.616 assert(method != NULL, "Should have method"); 1.617 - int bci = op->profiled_bci(); 1.618 + int bci = op->profiled_bci(); 1.619 md = method->method_data_or_null(); 1.620 assert(md != NULL, "Sanity"); 1.621 data = md->bci_to_data(bci); 1.622 - assert(data != NULL, "need data for type check"); 1.623 + assert(data != NULL, "need data for type check"); 1.624 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1.625 } 1.626 Label profile_cast_success, profile_cast_failure, done; 1.627 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1.628 - Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 1.629 - //__ cmpptr(value, (int32_t)NULL_WORD); 1.630 + Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1.631 + 1.632 if(op->should_profile()) { 1.633 - Label not_null; 1.634 - __ bne(value, R0, not_null); 1.635 - __ delayed()->nop(); 1.636 - 1.637 - // __ jcc(Assembler::notEqual, profile_done); 1.638 - // __ bne(obj, R0, profile_done); 1.639 - //__ delayed()->nop(); 1.640 - 1.641 - // Object is null; update methodDataOop 1.642 - //ciMethodData* md = method->method_data(); 1.643 - //if (md == NULL) { 1.644 -// bailout("out of memory building methodDataOop"); 1.645 -// return; 1.646 - // } 1.647 - // ciProfileData* data = md->bci_to_data(bci); 1.648 - //assert(data != NULL, "need data for checkcast"); 1.649 - // assert(data->is_BitData(), "need BitData for checkcast"); 1.650 - Register mdo = klass_RInfo; 1.651 - int oop_index = __ oop_recorder()->find_index(md->constant_encoding()); 1.652 - RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.653 - __ relocate(rspec); 1.654 -#ifndef _LP64 1.655 - //by_css 1.656 - __ lui(mdo, Assembler::split_high((int)md->constant_encoding())); 1.657 - __ addiu(mdo, mdo, Assembler::split_low((int)md->consant_encoding())); 1.658 -#else 1.659 - __ li48(mdo, (long)md->constant_encoding()); 1.660 -#endif 1.661 - 1.662 - Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1.663 - //FIXME, it very ineffictive to replace orl with 3 mips instruction @jerome, 12/27,06 1.664 - //__ orl(data_addr, BitData::null_flag_constant()); 1.665 - int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1.666 - __ lw(AT, data_addr); 1.667 - __ ori(AT, AT, header_bits); 1.668 - __ sw(AT,data_addr); 1.669 - __ b(done); 1.670 - __ delayed()->nop(); 1.671 - __ bind(not_null); 1.672 - } else { 1.673 - __ beq(value, R0, done); 1.674 - __ delayed()->nop(); 1.675 - } 1.676 - //__ verify_oop(obj); 1.677 + Label not_null; 1.678 + __ bne(value, R0, not_null); 1.679 + __ delayed()->nop(); 1.680 + 1.681 + Register mdo = klass_RInfo; 1.682 + __ mov_metadata(mdo, md->constant_encoding()); 1.683 + Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1.684 + int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1.685 + __ lw(AT, data_addr); 1.686 + __ ori(AT, AT, header_bits); 1.687 + __ sw(AT,data_addr); 1.688 + __ b(done); 1.689 + __ delayed()->nop(); 1.690 + __ bind(not_null); 1.691 + } else { 1.692 + __ beq(value, R0, done); 1.693 + __ delayed()->nop(); 1.694 + } 1.695 + 1.696 add_debug_info_for_null_check_here(op->info_for_exception()); 1.697 __ load_klass(k_RInfo, array); 1.698 __ load_klass(klass_RInfo, value); 1.699 - // get instance klass (it's already uncompressed) 1.700 - //__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1.701 - __ daddi (k_RInfo, k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset())); 1.702 - // perform the fast part of the checking logic 1.703 - //__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1.704 - // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1.705 -//1899 __ push(klass_RInfo); 1.706 -//1900 __ push(k_RInfo); 1.707 -//1901 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1.708 -//1902 __ pop(klass_RInfo); 1.709 -//1903 __ pop(k_RInfo); 1.710 -//1904 // result is a boolean 1.711 -///1905 __ cmpl(k_RInfo, 0); 1.712 -//1906 __ jcc(Assembler::equal, *failure_target); 1.713 -//1907 // fall through to the success case 1.714 -//1908 1.715 -//1909 if (op->should_profile()) { 1.716 -//1910 Register mdo = klass_RInfo, recv = k_RInfo; 1.717 -//1911 __ bind(profile_cast_success); 1.718 -//1912 __ mov_metadata(mdo, md->constant_encoding()); 1.719 -//1913 __ load_klass(recv, value); 1.720 -//1914 Label update_done; 1.721 -//1915 type_profile_helper(mdo, md, data, recv, &done); 1.722 -//1916 __ jmpb(done); 1.723 -//1917 1.724 -//1918 __ bind(profile_cast_failure); 1.725 -//1919 __ mov_metadata(mdo, md->constant_encoding()); 1.726 -//1920 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1.727 -//1921 __ subptr(counter_addr, DataLayout::counter_increment); 1.728 -//1922 __ jmp(*stub->entry()); 1.729 -//1923 } 1.730 -//1925 __ bind(done); 1.731 -//1926 } else 1.732 -//1927 if (code == lir_checkcast) { 1.733 -//1928 Register obj = op->object()->as_register(); 1.734 -//1929 Register dst = op->result_opr()->as_register(); 1.735 -//1930 Label success; 1.736 -//1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1.737 -//1932 __ bind(success); 1.738 -//1933 if (dst != obj) { 1.739 -//1934 __ mov(dst, obj); 1.740 -//1935 } 1.741 -//1936 } else 1.742 -//1937 if (code == lir_instanceof) { 1.743 -//1938 Register obj = op->object()->as_register(); 1.744 -///1939 Register dst = op->result_opr()->as_register(); 1.745 -//1940 Label success, failure, done; 1.746 -//1941 emit_typecheck_helper(op, &success, &failure, &failure); 1.747 -///1942 __ bind(failure); 1.748 -//1943 __ xorptr(dst, dst); 1.749 -//1944 __ jmpb(done); 1.750 -//1945 __ bind(success); 1.751 -//1946 __ movptr(dst, 1); 1.752 -//1947 __ bind(done); 1.753 -//1948 } else { 1.754 -//1949 ShouldNotReachHere(); 1.755 -//1950 } 1.756 -//FIXME:wuhui. 1.757 - 1.758 + // get instance klass (it's already uncompressed) 1.759 + __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1.760 + // perform the fast part of the checking logic 1.761 + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, tmp, success_target, failure_target, NULL); 1.762 + __ push(klass_RInfo); 1.763 + __ push(k_RInfo); 1.764 + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); 1.765 + __ pop(klass_RInfo); 1.766 + __ pop(k_RInfo); 1.767 + // result is a boolean 1.768 + __ beq(k_RInfo, R0, *failure_target); 1.769 + __ delayed()->nop(); 1.770 + // fall through to the success case 1.771 + 1.772 + if (op->should_profile()) { 1.773 + Register mdo = klass_RInfo, recv = k_RInfo; 1.774 + __ bind(profile_cast_success); 1.775 + __ mov_metadata(mdo, md->constant_encoding()); 1.776 + __ load_klass(recv, value); 1.777 + Label update_done; 1.778 + type_profile_helper(mdo, md, data, recv, &done); 1.779 + __ b(done); 1.780 + __ delayed()->nop(); 1.781 + 1.782 + __ bind(profile_cast_failure); 1.783 + __ mov_metadata(mdo, md->constant_encoding()); 1.784 + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1.785 + __ ld_ptr(AT, counter_addr); 1.786 + __ addi(AT, AT, -DataLayout::counter_increment); 1.787 + __ st_ptr(AT, counter_addr); 1.788 + __ b(*stub->entry()); 1.789 + __ delayed()->nop(); 1.790 + } 1.791 + 1.792 + __ bind(done); 1.793 + } else if (code == lir_checkcast) { 1.794 + Register obj = op->object()->as_register(); 1.795 + Register dst = op->result_opr()->as_register(); 1.796 + Label success; 1.797 + emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1.798 + __ bind(success); 1.799 + if (dst != obj) { 1.800 + __ move(dst, obj); 1.801 + } 1.802 + } else if (code == lir_instanceof) { 1.803 + Register obj = op->object()->as_register(); 1.804 + Register dst = op->result_opr()->as_register(); 1.805 + Label success, failure, done; 1.806 + emit_typecheck_helper(op, &success, &failure, &failure); 1.807 + __ bind(failure); 1.808 + __ move(dst, R0); 1.809 + __ b(done); 1.810 + __ delayed()->nop(); 1.811 + __ bind(success); 1.812 + __ addi(dst, R0, 1); 1.813 + __ bind(done); 1.814 + } else { 1.815 + ShouldNotReachHere(); 1.816 + } 1.817 } 1.818 1.819 - 1.820 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1.821 if (op->code() == lir_cas_long) { 1.822 #ifdef _LP64 1.823 @@ -3370,8 +3504,18 @@ 1.824 Unimplemented(); 1.825 } else if (right->is_constant()) { 1.826 // cpu register - constant 1.827 - Register res = dest->as_register(); 1.828 - jint c = right->as_constant_ptr()->as_jint(); 1.829 + Register res; 1.830 + if (dest->is_double_cpu()) { 1.831 + res = dest->as_register_lo(); 1.832 + } else { 1.833 + res = dest->as_register(); 1.834 + } 1.835 + jint c; 1.836 + if (right->type() == T_INT) { 1.837 + c = right->as_constant_ptr()->as_jint(); 1.838 + } else { 1.839 + c = right->as_constant_ptr()->as_jlong(); 1.840 + } 1.841 1.842 switch (code) { 1.843 case lir_mul_strictfp: 1.844 @@ -3763,43 +3907,43 @@ 1.845 int val = right->as_constant_ptr()->as_jint(); 1.846 __ move(AT, val); 1.847 switch (code) { 1.848 - case lir_logic_and: 1.849 - __ andr (dstreg, reg, AT); 1.850 - break; 1.851 - case lir_logic_or: 1.852 - __ orr(dstreg, reg, AT); 1.853 - break; 1.854 - case lir_logic_xor: 1.855 - __ xorr(dstreg, reg, AT); 1.856 - break; 1.857 - default: ShouldNotReachHere(); 1.858 + case lir_logic_and: 1.859 + __ andr (dstreg, reg, AT); 1.860 + break; 1.861 + case lir_logic_or: 1.862 + __ orr(dstreg, reg, AT); 1.863 + break; 1.864 + case lir_logic_xor: 1.865 + __ xorr(dstreg, reg, AT); 1.866 + break; 1.867 + default: ShouldNotReachHere(); 1.868 } 1.869 } else if (right->is_stack()) { 1.870 // added support for stack operands 1.871 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1.872 switch (code) { 1.873 - case lir_logic_and: 1.874 - //FIXME. lw or ld_ptr? 1.875 - __ lw(AT, raddr); 1.876 - __ andr(reg, reg,AT); 1.877 - break; 1.878 - case lir_logic_or: 1.879 - __ lw(AT, raddr); 1.880 - __ orr(reg, reg, AT); 1.881 - break; 1.882 - case lir_logic_xor: 1.883 - __ lw(AT, raddr); 1.884 - __ xorr(reg, reg, AT); 1.885 - break; 1.886 - default: ShouldNotReachHere(); 1.887 + case lir_logic_and: 1.888 + //FIXME. lw or ld_ptr? 1.889 + __ lw(AT, raddr); 1.890 + __ andr(reg, reg,AT); 1.891 + break; 1.892 + case lir_logic_or: 1.893 + __ lw(AT, raddr); 1.894 + __ orr(reg, reg, AT); 1.895 + break; 1.896 + case lir_logic_xor: 1.897 + __ lw(AT, raddr); 1.898 + __ xorr(reg, reg, AT); 1.899 + break; 1.900 + default: ShouldNotReachHere(); 1.901 } 1.902 } else { 1.903 Register rright = right->as_register(); 1.904 switch (code) { 1.905 - case lir_logic_and: __ andr (dstreg, reg, rright); break; 1.906 - case lir_logic_or : __ orr (dstreg, reg, rright); break; 1.907 - case lir_logic_xor: __ xorr (dstreg, reg, rright); break; 1.908 - default: ShouldNotReachHere(); 1.909 + case lir_logic_and: __ andr (dstreg, reg, rright); break; 1.910 + case lir_logic_or : __ orr (dstreg, reg, rright); break; 1.911 + case lir_logic_xor: __ xorr (dstreg, reg, rright); break; 1.912 + default: ShouldNotReachHere(); 1.913 } 1.914 } 1.915 } else { 1.916 @@ -3817,46 +3961,46 @@ 1.917 int r_hi = right->as_constant_ptr()->as_jint_hi(); 1.918 1.919 switch (code) { 1.920 - case lir_logic_and: 1.921 - __ move(AT, r_lo); 1.922 - __ andr(dst_lo, l_lo, AT); 1.923 - __ move(AT, r_hi); 1.924 - __ andr(dst_hi, l_hi, AT); 1.925 - break; 1.926 - 1.927 - case lir_logic_or: 1.928 - __ move(AT, r_lo); 1.929 - __ orr(dst_lo, l_lo, AT); 1.930 - __ move(AT, r_hi); 1.931 - __ orr(dst_hi, l_hi, AT); 1.932 - break; 1.933 - 1.934 - case lir_logic_xor: 1.935 - __ move(AT, r_lo); 1.936 - __ xorr(dst_lo, l_lo, AT); 1.937 - __ move(AT, r_hi); 1.938 - __ xorr(dst_hi, l_hi, AT); 1.939 - break; 1.940 - 1.941 - default: ShouldNotReachHere(); 1.942 + case lir_logic_and: 1.943 + __ move(AT, r_lo); 1.944 + __ andr(dst_lo, l_lo, AT); 1.945 + __ move(AT, r_hi); 1.946 + __ andr(dst_hi, l_hi, AT); 1.947 + break; 1.948 + 1.949 + case lir_logic_or: 1.950 + __ move(AT, r_lo); 1.951 + __ orr(dst_lo, l_lo, AT); 1.952 + __ move(AT, r_hi); 1.953 + __ orr(dst_hi, l_hi, AT); 1.954 + break; 1.955 + 1.956 + case lir_logic_xor: 1.957 + __ move(AT, r_lo); 1.958 + __ xorr(dst_lo, l_lo, AT); 1.959 + __ move(AT, r_hi); 1.960 + __ xorr(dst_hi, l_hi, AT); 1.961 + break; 1.962 + 1.963 + default: ShouldNotReachHere(); 1.964 } 1.965 #else 1.966 __ li(AT, right->as_constant_ptr()->as_jlong()); 1.967 1.968 switch (code) { 1.969 - case lir_logic_and: 1.970 - __ andr(dst_lo, l_lo, AT); 1.971 - break; 1.972 - 1.973 - case lir_logic_or: 1.974 - __ orr(dst_lo, l_lo, AT); 1.975 - break; 1.976 - 1.977 - case lir_logic_xor: 1.978 - __ xorr(dst_lo, l_lo, AT); 1.979 - break; 1.980 - 1.981 - default: ShouldNotReachHere(); 1.982 + case lir_logic_and: 1.983 + __ andr(dst_lo, l_lo, AT); 1.984 + break; 1.985 + 1.986 + case lir_logic_or: 1.987 + __ orr(dst_lo, l_lo, AT); 1.988 + break; 1.989 + 1.990 + case lir_logic_xor: 1.991 + __ xorr(dst_lo, l_lo, AT); 1.992 + break; 1.993 + 1.994 + default: ShouldNotReachHere(); 1.995 } 1.996 #endif 1.997 1.998 @@ -3865,19 +4009,19 @@ 1.999 Register r_hi = right->as_register_hi(); 1.1000 1.1001 switch (code) { 1.1002 - case lir_logic_and: 1.1003 - __ andr(dst_lo, l_lo, r_lo); 1.1004 - NOT_LP64(__ andr(dst_hi, l_hi, r_hi);) 1.1005 - break; 1.1006 - case lir_logic_or: 1.1007 - __ orr(dst_lo, l_lo, r_lo); 1.1008 - NOT_LP64(__ orr(dst_hi, l_hi, r_hi);) 1.1009 - break; 1.1010 - case lir_logic_xor: 1.1011 - __ xorr(dst_lo, l_lo, r_lo); 1.1012 - NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);) 1.1013 - break; 1.1014 - default: ShouldNotReachHere(); 1.1015 + case lir_logic_and: 1.1016 + __ andr(dst_lo, l_lo, r_lo); 1.1017 + NOT_LP64(__ andr(dst_hi, l_hi, r_hi);) 1.1018 + break; 1.1019 + case lir_logic_or: 1.1020 + __ orr(dst_lo, l_lo, r_lo); 1.1021 + NOT_LP64(__ orr(dst_hi, l_hi, r_hi);) 1.1022 + break; 1.1023 + case lir_logic_xor: 1.1024 + __ xorr(dst_lo, l_lo, r_lo); 1.1025 + NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);) 1.1026 + break; 1.1027 + default: ShouldNotReachHere(); 1.1028 } 1.1029 } 1.1030 } 1.1031 @@ -4111,28 +4255,6 @@ 1.1032 1.1033 1.1034 void LIR_Assembler::align_call(LIR_Code code) { 1.1035 -//FIXME. aoqi, this right? 1.1036 -// do nothing since all instructions are word aligned on sparc 1.1037 -/* 1.1038 - if (os::is_MP()) { 1.1039 - // make sure that the displacement word of the call ends up word aligned 1.1040 - int offset = __ offset(); 1.1041 - switch (code) { 1.1042 - case lir_static_call: 1.1043 - case lir_optvirtual_call: 1.1044 - offset += NativeCall::displacement_offset; 1.1045 - break; 1.1046 - case lir_icvirtual_call: 1.1047 - offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; 1.1048 - break; 1.1049 - case lir_virtual_call: // currently, sparc-specific for niagara 1.1050 - default: ShouldNotReachHere(); 1.1051 - } 1.1052 - while (offset++ % BytesPerWord != 0) { 1.1053 - __ nop(); 1.1054 - } 1.1055 - } 1.1056 -*/ 1.1057 } 1.1058 1.1059 1.1060 @@ -4145,26 +4267,8 @@ 1.1061 1.1062 1.1063 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 1.1064 - RelocationHolder rh = virtual_call_Relocation::spec(pc()); 1.1065 -// int oop_index = __ oop_recorder()->allocate_oop_index((jobject)Universe::non_oop_word()); 1.1066 -// RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.1067 -/// __ relocate(rspec); 1.1068 -#ifndef _LP64 1.1069 -//by_css 1.1070 - __ lui(IC_Klass, Assembler::split_high((int)Universe::non_oop_word())); 1.1071 - __ addiu(IC_Klass, IC_Klass, Assembler::split_low((int)Universe::non_oop_word())); 1.1072 -#else 1.1073 - __ li48(IC_Klass, (long)Universe::non_oop_word()); 1.1074 -#endif 1.1075 - __ call(op->addr(), rh); 1.1076 - __ delayed()->nop(); 1.1077 -// add_call_info(code_offset(), info); 1.1078 - 1.1079 - add_call_info(code_offset(), op->info()); 1.1080 - assert(!os::is_MP() || 1.1081 - (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 1.1082 - "must be aligned"); 1.1083 - 1.1084 + __ ic_call(op->addr()); 1.1085 + add_call_info(code_offset(), op->info()); 1.1086 } 1.1087 1.1088 1.1089 @@ -4182,36 +4286,17 @@ 1.1090 bailout("static call stub overflow"); 1.1091 return; 1.1092 } 1.1093 - 1.1094 int start = __ offset(); 1.1095 - /* 1.1096 - if (os::is_MP()) { 1.1097 - // make sure that the displacement word of the call ends up word aligned 1.1098 - int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; 1.1099 - while (offset++ % BytesPerWord != 0) { 1.1100 - __ nop(); 1.1101 - } 1.1102 - } 1.1103 - */ 1.1104 __ relocate(static_stub_Relocation::spec(call_pc)); 1.1105 - jobject o=NULL; 1.1106 - int oop_index = __ oop_recorder()->allocate_oop_index((jobject)o); 1.1107 - RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.1108 + 1.1109 + Metadata *o = NULL; 1.1110 + int index = __ oop_recorder()->allocate_metadata_index(o); 1.1111 + RelocationHolder rspec = metadata_Relocation::spec(index); 1.1112 __ relocate(rspec); 1.1113 //see set_to_interpreted 1.1114 -#ifndef _LP64 1.1115 - __ lui(T7, Assembler::split_high((int)o)); 1.1116 - __ addiu(T7, T7, Assembler::split_low((int)o)); 1.1117 -#else 1.1118 - __ li48(Rmethod, (long)o); 1.1119 -#endif 1.1120 -#ifndef _LP64 1.1121 - __ lui(AT, Assembler::split_high((int)-1)); 1.1122 - __ addiu(AT, AT, Assembler::split_low((int)-1)); 1.1123 -#else 1.1124 - __ li48(AT, (long)-1); 1.1125 -#endif 1.1126 - //assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); 1.1127 + __ patchable_set48(Rmethod, (long)o); 1.1128 + 1.1129 + __ patchable_set48(AT, (long)-1); 1.1130 __ jr(AT); 1.1131 __ delayed()->nop(); 1.1132 assert(__ offset() - start <= call_stub_size, "stub too big"); 1.1133 @@ -4227,52 +4312,29 @@ 1.1134 // (LinearScan assumes that no oops are in fixed registers) 1.1135 1.1136 info->add_register_oop(exceptionOop); 1.1137 - //if (!unwind) { 1.1138 - // get current pc information 1.1139 - // pc is only needed if the method has an exception handler, the unwind code does not need it. 1.1140 -#ifndef _LP64 1.1141 -//by_css 1.1142 - int pc_for_athrow = (int)__ pc(); 1.1143 - int pc_for_athrow_offset = __ offset(); 1.1144 - Register epc = exceptionPC->as_register(); 1.1145 - //__ nop(); 1.1146 - // pc_for_athrow can not point to itself (relocInfo restriction), no need now 1.1147 - __ relocate(relocInfo::internal_pc_type); 1.1148 - __ lui(epc, Assembler::split_high(pc_for_athrow)); 1.1149 - __ addiu(epc, epc, Assembler::split_low(pc_for_athrow)); 1.1150 -#else 1.1151 - long pc_for_athrow = (long)__ pc(); 1.1152 - int pc_for_athrow_offset = __ offset(); 1.1153 - Register epc = exceptionPC->as_register(); 1.1154 - //__ nop(); 1.1155 - // pc_for_athrow can not point to itself (relocInfo restriction), no need now 1.1156 - __ relocate(relocInfo::internal_pc_type); 1.1157 - __ li48(epc, pc_for_athrow); 1.1158 -#endif 1.1159 - add_call_info(pc_for_athrow_offset, info); // for exception handler 1.1160 - __ verify_not_null_oop(V0); 1.1161 - // search an exception handler (eax: exception oop, edx: throwing pc) 1.1162 - if (compilation()->has_fpu_code()) { 1.1163 - __ call(Runtime1::entry_for(Runtime1::handle_exception_id), 1.1164 - relocInfo::runtime_call_type); 1.1165 - } else { 1.1166 - __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id), 1.1167 - relocInfo::runtime_call_type); 1.1168 - } 1.1169 -// } else { 1.1170 -// __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), 1.1171 -// relocInfo::runtime_call_type); 1.1172 -// } 1.1173 - 1.1174 - // enough room for two byte trap 1.1175 + long pc_for_athrow = (long)__ pc(); 1.1176 + int pc_for_athrow_offset = __ offset(); 1.1177 + Register epc = exceptionPC->as_register(); 1.1178 + __ relocate(relocInfo::internal_pc_type); 1.1179 + __ li48(epc, pc_for_athrow); 1.1180 + add_call_info(pc_for_athrow_offset, info); // for exception handler 1.1181 + __ verify_not_null_oop(V0); 1.1182 + // search an exception handler (eax: exception oop, edx: throwing pc) 1.1183 + if (compilation()->has_fpu_code()) { 1.1184 + __ call(Runtime1::entry_for(Runtime1::handle_exception_id), 1.1185 + relocInfo::runtime_call_type); 1.1186 + } else { 1.1187 + __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id), 1.1188 + relocInfo::runtime_call_type); 1.1189 + } 1.1190 __ delayed()->nop(); 1.1191 } 1.1192 1.1193 -void LIR_Assembler::unwind_op(LIR_Opr exceptionOop){ 1.1194 - assert(exceptionOop->as_register()== FSR, "must match"); 1.1195 - __ b(_unwind_handler_entry); 1.1196 - __ delayed()->nop(); 1.1197 - } 1.1198 +void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1.1199 + assert(exceptionOop->as_register()== FSR, "must match"); 1.1200 + __ b(_unwind_handler_entry); 1.1201 + __ delayed()->nop(); 1.1202 +} 1.1203 1.1204 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 1.1205 // optimized version for linear scan: 1.1206 @@ -4296,12 +4358,11 @@ 1.1207 assert_different_registers(count_reg, value_reg); 1.1208 switch (code) { 1.1209 case lir_shl: 1.1210 - if (dest->type() == T_INT) 1.1211 - __ sllv(dest_reg, value_reg, count_reg); 1.1212 - else 1.1213 - __ dsllv(dest_reg, value_reg, count_reg); 1.1214 - break; 1.1215 -//__ dsllv(dest_reg, value_reg, count_reg); break; 1.1216 + if (dest->type() == T_INT) 1.1217 + __ sllv(dest_reg, value_reg, count_reg); 1.1218 + else 1.1219 + __ dsllv(dest_reg, value_reg, count_reg); 1.1220 + break; 1.1221 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break; 1.1222 case lir_ushr: 1.1223 #if 1 1.1224 @@ -4325,15 +4386,14 @@ 1.1225 // 1.1226 // 108 ushift_right [a6|I] [a4|I] [a4|I] 1.1227 // 0x00000055646d2f70: dsll32 a4, a6, 0 \ 1.1228 -// 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error! 1.1229 -// 0x00000055646d2f78: dsrl a4, a4, a4 / 1.1230 - if (left->type() == T_INT && dest->type() == T_INT) 1.1231 - { 1.1232 - __ dsll32(AT, value_reg, 0); // Omit the high 32 bits 1.1233 - __ dsrl32(AT, AT, 0); 1.1234 - __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift 1.1235 - break; 1.1236 - } 1.1237 +// 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error! 1.1238 +// 0x00000055646d2f78: dsrl a4, a4, a4 / 1.1239 + if (left->type() == T_INT && dest->type() == T_INT) { 1.1240 + __ dsll32(AT, value_reg, 0); // Omit the high 32 bits 1.1241 + __ dsrl32(AT, AT, 0); 1.1242 + __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift 1.1243 + break; 1.1244 + } 1.1245 #endif 1.1246 __ dsrlv(dest_reg, value_reg, count_reg); break; 1.1247 default: ShouldNotReachHere(); 1.1248 @@ -4494,8 +4554,7 @@ 1.1249 case lir_shr: __ dsra(dest_reg, value_reg, count); break; 1.1250 case lir_ushr: 1.1251 #if 1 1.1252 - if (left->type() == T_INT && dest->type() == T_INT) 1.1253 - { 1.1254 + if (left->type() == T_INT && dest->type() == T_INT) { 1.1255 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation! 1.1256 However, dsrl will shift in company with the highest 32 bits. 1.1257 Thus, if the source register contains a negative value, 1.1258 @@ -4790,13 +4849,12 @@ 1.1259 } 1.1260 1.1261 if (flags & LIR_OpArrayCopy::type_check) { 1.1262 - if (UseCompressedOops) { 1.1263 - __ lw(AT, src_klass_addr); 1.1264 - __ lw(tmp, dst_klass_addr); 1.1265 - } else { 1.1266 - __ ld(AT, src_klass_addr); 1.1267 - __ ld(tmp, dst_klass_addr); 1.1268 - } 1.1269 + if (UseCompressedClassPointers) { 1.1270 + __ lw(AT, src_klass_addr); 1.1271 + __ lw(tmp, dst_klass_addr); 1.1272 + } else { 1.1273 + __ ld(AT, src_klass_addr); __ ld(tmp, dst_klass_addr); 1.1274 + } 1.1275 __ bne_far(AT, tmp, *stub->entry()); 1.1276 __ delayed()->nop(); 1.1277 } 1.1278 @@ -4810,36 +4868,37 @@ 1.1279 // a type check i needed then at this point the classes are known to be 1.1280 // the same but again which don't know which type so we can't check them. 1.1281 Label known_ok, halt; 1.1282 -//FIXME:wuhui. not finished. __ mov_metadata(tmp, default_type->constant_encoding()); 1.1283 + __ mov_metadata(tmp, default_type->constant_encoding()); 1.1284 #ifdef _LP64 1.1285 - if (UseCompressedOops) { 1.1286 - __ encode_heap_oop(AT); 1.1287 - __ lw(tmp, dst_klass_addr); 1.1288 - } else 1.1289 + if (UseCompressedClassPointers) { 1.1290 + __ encode_klass_not_null(tmp); 1.1291 + } 1.1292 #endif 1.1293 - { 1.1294 - __ ld(tmp, dst_klass_addr); 1.1295 - } 1.1296 if (basic_type != T_OBJECT) { 1.1297 + if (UseCompressedClassPointers) { 1.1298 + __ lw(AT, dst_klass_addr); 1.1299 + } else { 1.1300 + __ ld(AT, dst_klass_addr); 1.1301 + } 1.1302 __ bne(AT, tmp, halt); 1.1303 __ delayed()->nop(); 1.1304 - if (UseCompressedOops) { 1.1305 - __ lw(tmp, src_klass_addr); 1.1306 + if (UseCompressedClassPointers) { 1.1307 + __ lw(AT, src_klass_addr); 1.1308 } else { 1.1309 - __ ld(tmp, src_klass_addr); 1.1310 + __ ld(AT, src_klass_addr); 1.1311 } 1.1312 __ beq(AT, tmp, known_ok); 1.1313 __ delayed()->nop(); 1.1314 } else { 1.1315 - if (UseCompressedOops) { 1.1316 - __ lw(tmp, dst_klass_addr); 1.1317 - } else { 1.1318 - __ ld(tmp, dst_klass_addr); 1.1319 - } 1.1320 - __ beq(AT, tmp, known_ok); 1.1321 - __ delayed()->nop(); 1.1322 - __ beq(src, dst, known_ok); 1.1323 - __ delayed()->nop(); 1.1324 + if (UseCompressedClassPointers) { 1.1325 + __ lw(AT, dst_klass_addr); 1.1326 + } else { 1.1327 + __ ld(AT, dst_klass_addr); 1.1328 + } 1.1329 + __ beq(AT, tmp, known_ok); 1.1330 + __ delayed()->nop(); 1.1331 + __ beq(src, dst, known_ok); 1.1332 + __ delayed()->nop(); 1.1333 } 1.1334 __ bind(halt); 1.1335 __ stop("incorrect type information in arraycopy"); 1.1336 @@ -4916,6 +4975,7 @@ 1.1337 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo(); 1.1338 if (!UseFastLocking) { 1.1339 __ b_far(*op->stub()->entry()); 1.1340 + __ delayed()->nop(); 1.1341 } else if (op->code() == lir_lock) { 1.1342 Register scratch = noreg; 1.1343 if (UseBiasedLocking) { 1.1344 @@ -4957,21 +5017,12 @@ 1.1345 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 1.1346 Register mdo = op->mdo()->as_register(); 1.1347 1.1348 - int oop_index = __ oop_recorder()->find_index(md->constant_encoding()); 1.1349 - RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.1350 - __ relocate(rspec); 1.1351 -#ifndef _LP64 1.1352 - //by_css 1.1353 - __ lui(mdo, Assembler::split_high((int)md->constant_encoding())); 1.1354 - __ addiu(mdo, mdo, Assembler::split_low((int)md->constant_encoding())); 1.1355 -#else 1.1356 - __ li48(mdo, (long)md->constant_encoding()); 1.1357 -#endif 1.1358 + __ mov_metadata(mdo, md->constant_encoding()); 1.1359 1.1360 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1.1361 - __ lw(AT, counter_addr); 1.1362 - __ addi(AT,AT, DataLayout::counter_increment); 1.1363 - __ sw(AT,counter_addr); 1.1364 + __ ld_ptr(AT, counter_addr); 1.1365 + __ addi(AT, AT, DataLayout::counter_increment); 1.1366 + __ st_ptr(AT, counter_addr); 1.1367 1.1368 Bytecodes::Code bc = method->java_code_at_bci(bci); 1.1369 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 1.1370 @@ -4980,11 +5031,11 @@ 1.1371 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 1.1372 !callee_is_static && //required for optimized MH invokes 1.1373 C1ProfileVirtualCalls) { 1.1374 - assert(op->recv()->is_single_cpu(), "recv must be allocated"); 1.1375 - Register recv = op->recv()->as_register(); 1.1376 - assert_different_registers(mdo, recv); 1.1377 - assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 1.1378 - ciKlass* known_klass = op->known_holder(); 1.1379 + assert(op->recv()->is_single_cpu(), "recv must be allocated"); 1.1380 + Register recv = op->recv()->as_register(); 1.1381 + assert_different_registers(mdo, recv); 1.1382 + assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 1.1383 + ciKlass* known_klass = op->known_holder(); 1.1384 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 1.1385 // We know the type that will be seen at this call site; we can 1.1386 // statically update the methodDataOop rather than needing to do 1.1387 @@ -4995,14 +5046,14 @@ 1.1388 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 1.1389 uint i; 1.1390 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1391 - ciKlass* receiver = vc_data->receiver(i); 1.1392 - if (known_klass->equals(receiver)) { 1.1393 - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1394 - __ lw(AT, data_addr); 1.1395 - __ addi(AT, AT, DataLayout::counter_increment); 1.1396 - __ sw(AT, data_addr); 1.1397 - return; 1.1398 - } 1.1399 + ciKlass* receiver = vc_data->receiver(i); 1.1400 + if (known_klass->equals(receiver)) { 1.1401 + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1402 + __ ld_ptr(AT, data_addr); 1.1403 + __ addi(AT, AT, DataLayout::counter_increment); 1.1404 + __ st_ptr(AT, data_addr); 1.1405 + return; 1.1406 + } 1.1407 } 1.1408 1.1409 // Receiver type not found in profile data; select an empty slot 1.1410 @@ -5011,65 +5062,55 @@ 1.1411 // always does a write to the receiver part of the 1.1412 // VirtualCallData rather than just the first time 1.1413 for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1414 - ciKlass* receiver = vc_data->receiver(i); 1.1415 - if (receiver == NULL) { 1.1416 - Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 1.1417 - int oop_index = __ oop_recorder()->find_index(known_klass->constant_encoding()); 1.1418 - RelocationHolder rspec = oop_Relocation::spec(oop_index); 1.1419 - __ relocate(rspec); 1.1420 -#ifndef _LP64 1.1421 - //by_css 1.1422 - __ lui(AT, Assembler::split_high((int)known_klass->constant_encoding())); 1.1423 - __ addiu(AT, AT, Assembler::split_low((int)known_klass->constant_encoding())); 1.1424 -#else 1.1425 - __ li48(AT, (long)known_klass->constant_encoding()); 1.1426 -#endif 1.1427 - __ st_ptr(AT,recv_addr); 1.1428 - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1429 - __ lw(AT, data_addr); 1.1430 - __ addi(AT, AT, DataLayout::counter_increment); 1.1431 - __ sw(AT, data_addr); 1.1432 - return; 1.1433 - } 1.1434 + ciKlass* receiver = vc_data->receiver(i); 1.1435 + if (receiver == NULL) { 1.1436 + Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 1.1437 + __ mov_metadata(AT, known_klass->constant_encoding()); 1.1438 + __ st_ptr(AT,recv_addr); 1.1439 + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1440 + __ ld_ptr(AT, data_addr); 1.1441 + __ addi(AT, AT, DataLayout::counter_increment); 1.1442 + __ st_ptr(AT, data_addr); 1.1443 + return; 1.1444 + } 1.1445 } 1.1446 } else { 1.1447 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); 1.1448 - __ load_klass(recv, recv); 1.1449 - Label update_done; 1.1450 - uint i; 1.1451 - for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1452 - Label next_test; 1.1453 + __ load_klass(recv, recv); 1.1454 + Label update_done; 1.1455 + uint i; 1.1456 + for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1457 + Label next_test; 1.1458 // See if the receiver is receiver[n]. 1.1459 - __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); 1.1460 - __ bne(recv,AT,next_test); 1.1461 - __ delayed()->nop(); 1.1462 - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1463 - __ lw(AT, data_addr); 1.1464 - __ addi(AT, AT, DataLayout::counter_increment); 1.1465 - __ sw(AT, data_addr); 1.1466 - __ b(update_done); 1.1467 - __ delayed()->nop(); 1.1468 - __ bind(next_test); 1.1469 - } 1.1470 + __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); 1.1471 + __ bne(recv,AT,next_test); 1.1472 + __ delayed()->nop(); 1.1473 + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 1.1474 + __ ld_ptr(AT, data_addr); 1.1475 + __ addi(AT, AT, DataLayout::counter_increment); 1.1476 + __ st_ptr(AT, data_addr); 1.1477 + __ b(update_done); 1.1478 + __ delayed()->nop(); 1.1479 + __ bind(next_test); 1.1480 + } 1.1481 1.1482 // Didn't find receiver; find next empty slot and fill it in 1.1483 - for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1484 - Label next_test; 1.1485 - Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 1.1486 - __ ld_ptr(AT, recv_addr); 1.1487 - __ bne(AT, R0, next_test); 1.1488 - __ delayed()->nop(); 1.1489 - __ st_ptr(recv, recv_addr); 1.1490 - __ move(AT,DataLayout::counter_increment); 1.1491 - __ sw(AT,Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)))); 1.1492 - if (i < (VirtualCallData::row_limit() - 1)) { 1.1493 - __ b(update_done); 1.1494 - __ delayed()->nop(); 1.1495 - } 1.1496 - __ bind(next_test); 1.1497 - } 1.1498 - 1.1499 - __ bind(update_done); 1.1500 + for (i = 0; i < VirtualCallData::row_limit(); i++) { 1.1501 + Label next_test; 1.1502 + Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 1.1503 + __ ld_ptr(AT, recv_addr); 1.1504 + __ bne(AT, R0, next_test); 1.1505 + __ delayed()->nop(); 1.1506 + __ st_ptr(recv, recv_addr); 1.1507 + __ move(AT, DataLayout::counter_increment); 1.1508 + __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)))); 1.1509 + if (i < (VirtualCallData::row_limit() - 1)) { 1.1510 + __ b(update_done); 1.1511 + __ delayed()->nop(); 1.1512 + } 1.1513 + __ bind(next_test); 1.1514 + } 1.1515 + __ bind(update_done); 1.1516 } 1.1517 } 1.1518 } 1.1519 @@ -5092,6 +5133,7 @@ 1.1520 } 1.1521 1.1522 void LIR_Assembler::align_backward_branch_target() { 1.1523 + __ align(BytesPerWord); 1.1524 } 1.1525 1.1526 1.1527 @@ -5124,11 +5166,11 @@ 1.1528 } 1.1529 } 1.1530 1.1531 - 1.1532 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 1.1533 assert(addr->is_address() && dest->is_register(), "check"); 1.1534 - Register reg = dest->as_register(); 1.1535 - __ lea(dest->as_register(), as_Address(addr->as_address_ptr())); 1.1536 + Register reg; 1.1537 + reg = dest->as_pointer_register(); 1.1538 + __ lea(reg, as_Address(addr->as_address_ptr())); 1.1539 } 1.1540 1.1541 1.1542 @@ -5142,8 +5184,8 @@ 1.1543 __ lui(reg, Assembler::split_high((int)o)); 1.1544 __ addiu(reg, reg, Assembler::split_low((int)o)); 1.1545 #else 1.1546 - //__ li48(reg, (long)o); 1.1547 - __ li(reg, (long)o); 1.1548 + __ li48(reg, (long)o); 1.1549 + //__ patchable_set48(reg, (long)o); 1.1550 #endif 1.1551 } else { 1.1552 int oop_index = __ oop_recorder()->find_index(o); 1.1553 @@ -5154,8 +5196,8 @@ 1.1554 __ lui(reg, Assembler::split_high((int)o)); 1.1555 __ addiu(reg, reg, Assembler::split_low((int)o)); 1.1556 #else 1.1557 - //__ li48(reg, (long)o); 1.1558 - __ li(reg, (long)o); 1.1559 + __ li48(reg, (long)o); 1.1560 + //__ patchable_set48(reg, (long)o); 1.1561 #endif 1.1562 } 1.1563 } 1.1564 @@ -5321,6 +5363,7 @@ 1.1565 } else { 1.1566 ShouldNotReachHere(); 1.1567 }*/ 1.1568 + ShouldNotReachHere(); 1.1569 } 1.1570 1.1571 #undef __