src/cpu/mips/vm/c1_LIRAssembler_mips.cpp

changeset 8865
ffcdff41a92f
parent 6880
52ea28d233d2
child 9126
bc5b8e3dcb6b
equal deleted inserted replaced
8864:e4aeef458496 8865:ffcdff41a92f
314 314
315 } 315 }
316 316
317 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 317 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
318 jobject o = NULL; 318 jobject o = NULL;
319 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
320 int oop_index = __ oop_recorder()->allocate_oop_index(o); 319 int oop_index = __ oop_recorder()->allocate_oop_index(o);
320 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
321 RelocationHolder rspec = oop_Relocation::spec(oop_index); 321 RelocationHolder rspec = oop_Relocation::spec(oop_index);
322 __ relocate(rspec); 322 __ relocate(rspec);
323 #ifndef _LP64 323 #ifndef _LP64
324 //by_css 324 //by_css
325 __ lui(reg, Assembler::split_high((int)o)); 325 __ lui(reg, Assembler::split_high((int)o));
326 __ addiu(reg, reg, Assembler::split_low((int)o)); 326 __ addiu(reg, reg, Assembler::split_low((int)o));
327 #else 327 #else
328 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi 328 //li may not pass NativeMovConstReg::verify. see nativeMovConstReg_at(pc_start()); in PatchingStub::install. by aoqi
329 // __ li48(reg, (long)o);
329 __ li48(reg, (long)o); 330 __ li48(reg, (long)o);
330 #endif 331 #endif
331 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info); 332 // patching_epilog(patch, LIR_Op1::patch_normal, noreg, info);
332 patching_epilog(patch, lir_patch_normal, reg, info); 333 patching_epilog(patch, lir_patch_normal, reg, info);
333 } 334 }
334 335
335 336 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
336 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register unused, int monitor_no, Register exception) { 337 Metadata *o = NULL;
337 338 int index = __ oop_recorder()->allocate_metadata_index(o);
338 if (exception->is_valid()) { 339 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
339 // preserve exception 340 RelocationHolder rspec = metadata_Relocation::spec(index);
340 // note: the monitor_exit runtime call is a leaf routine 341 __ relocate(rspec);
341 // and cannot block => no GC can happen 342 __ li48(reg, (long)o);
342 // The slow case (MonitorAccessStub) uses the first two stack slots 343 patching_epilog(patch, lir_patch_normal, reg, info);
343 // ([SP+0] and [SP+4]), therefore we store the exception at [esp+8]
344 __ st_ptr(exception, SP, 2 * wordSize);
345 }
346
347 Register obj_reg = obj_opr->as_register();
348 Register lock_reg = lock_opr->as_register();
349
350 // compute pointer to BasicLock
351 //Address lock_addr = frame_map()->address_for_monitor_lock_index(monitor_no);
352 Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
353 __ lea(lock_reg, lock_addr);
354 // unlock object
355 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
356 // temporary fix: must be created after exceptionhandler, therefore as call stub
357 _slow_case_stubs->append(slow_case);
358 if (UseFastLocking) {
359 // try inlined fast unlocking first, revert to slow locking if it fails
360 // note: lock_reg points to the displaced header since the displaced header offset is 0!
361 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
362 __ unlock_object(NOREG, obj_reg, lock_reg, *slow_case->entry());
363 } else {
364 // always do slow unlocking
365 // note: the slow unlocking code could be inlined here, however if we use
366 // slow unlocking, speed doesn't matter anyway and this solution is
367 // simpler and requires less duplicated code - additionally, the
368 // slow unlocking code is the same in either case which simplifies
369 // debugging
370 __ b_far(*slow_case->entry());
371 __ delayed()->nop();
372 }
373 // done
374 __ bind(*slow_case->continuation());
375
376 if (exception->is_valid()) {
377 // restore exception
378 __ ld_ptr(exception, SP, 2 * wordSize);
379 }
380 } 344 }
381 345
382 // This specifies the esp decrement needed to build the frame 346 // This specifies the esp decrement needed to build the frame
383 int LIR_Assembler::initial_frame_size_in_bytes() const { 347 int LIR_Assembler::initial_frame_size_in_bytes() const {
384 // if rounding, must let FrameMap know! 348 // if rounding, must let FrameMap know!
403 } 367 }
404 368
405 // generate code for exception handler 369 // generate code for exception handler
406 address handler_base = __ start_a_stub(exception_handler_size); 370 address handler_base = __ start_a_stub(exception_handler_size);
407 if (handler_base == NULL) { 371 if (handler_base == NULL) {
408 //no enough space 372 // no enough space
409 bailout("exception handler overflow"); 373 bailout("exception handler overflow");
410 return -1; 374 return -1;
411 } 375 }
412 376
413 377 int offset = code_offset();
414 378
415 //compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset()); 379 // the exception oop and pc are in V0, and V1
416 // if the method does not have an exception handler, then there is
417 // no reason to search for one
418 //if (compilation()->has_exception_handlers() || JvmtiExport::can_post_exceptions()) {
419 // the exception oop and pc are in V0 and V1
420 // no other registers need to be preserved, so invalidate them
421 // check that there is really an exception
422 // __ verify_not_null_oop(V0);
423
424 // search an exception handler (V0: exception oop, V1: throwing pc)
425 // __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
426 // relocInfo::runtime_call_type);
427 // __ delayed()->nop();
428 // if the call returns here, then the exception handler for particular
429 // exception doesn't exist -> unwind activation and forward exception to caller
430 // }
431 int offset = code_offset();
432
433 // the exception oop is in V0
434 // no other registers need to be preserved, so invalidate them 380 // no other registers need to be preserved, so invalidate them
381 //__ invalidate_registers(false, true, true, false, true, true);
382
435 // check that there is really an exception 383 // check that there is really an exception
436 __ verify_not_null_oop(V0); 384 __ verify_not_null_oop(V0);
437 //FIXME:wuhui?? 385
438 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 386 // search an exception handler (V0: exception oop, V1: throwing pc)
439 //__ delayed()->nop(); 387 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
440 __ should_not_reach_here(); 388 __ delayed()->nop();
441 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 389 __ should_not_reach_here();
442 __ end_a_stub(); 390 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
443 return offset; 391 __ end_a_stub();
444 392
445 // unlock the receiver/klass if necessary 393 return offset;
446 // V0: exception
447 // ciMethod* method = compilation()->method();
448 // if (method->is_synchronized() && GenerateSynchronizationCode) {
449 //#ifndef _LP64
450 //by_css
451 // monitorexit(FrameMap::_t0_oop_opr, FrameMap::_t6_opr, NOREG, 0, V0);
452 //#else
453 // monitorexit(FrameMap::_t0_oop_opr, FrameMap::_a6_opr, NOREG, 0, V0);
454 //#endif
455 // }
456
457 // unwind activation and forward exception to caller
458 // V0: exception
459 // __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id),
460 // relocInfo::runtime_call_type);
461 // __ delayed()->nop();
462 // __ end_a_stub();
463 } 394 }
464 395
465 // Emit the code to remove the frame from the stack in the exception 396 // Emit the code to remove the frame from the stack in the exception
466 // unwind path. 397 // unwind path.
467 int LIR_Assembler::emit_unwind_handler() { 398 int LIR_Assembler::emit_unwind_handler() {
470 _masm->block_comment("Unwind handler"); 401 _masm->block_comment("Unwind handler");
471 } 402 }
472 #endif 403 #endif
473 404
474 int offset = code_offset(); 405 int offset = code_offset();
475 /* // Fetch the exception from TLS and clear out exception related thread state 406 // Fetch the exception from TLS and clear out exception related thread state
476 __ get_thread(rsi); 407 Register thread = TREG;
477 __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); 408 #ifndef OPT_THREAD
478 __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); 409 __ get_thread(thread);
479 __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); 410 #endif
411 __ ld_ptr(V0, Address(thread, JavaThread::exception_oop_offset()));
412 __ st_ptr(R0, Address(thread, JavaThread::exception_oop_offset()));
413 __ st_ptr(R0, Address(thread, JavaThread::exception_pc_offset()));
480 414
481 __ bind(_unwind_handler_entry); 415 __ bind(_unwind_handler_entry);
482 __ verify_not_null_oop(rax); 416 __ verify_not_null_oop(V0);
483 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 417 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
484 __ mov(rsi, rax); // Preserve the exception 418 __ move(S0, V0); // Preserve the exception (rbx is always callee-saved)
485 } 419 }
486 // Preform needed unlocking 420
487 MonitorExitStub* stub = NULL; 421 // Preform needed unlocking
422 MonitorExitStub* stub = NULL;
488 if (method()->is_synchronized()) { 423 if (method()->is_synchronized()) {
489 monitor_address(0, FrameMap::rax_opr); 424 monitor_address(0, FrameMap::_v0_opr);
490 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 425 stub = new MonitorExitStub(FrameMap::_v0_opr, true, 0);
491 __ unlock_object(rdi, rbx, rax, *stub->entry()); 426 __ unlock_object(A0, A1, V0, *stub->entry());
492 __ bind(*stub->continuation()); 427 __ bind(*stub->continuation());
493 } 428 }
494 429
495 if (compilation()->env()->dtrace_method_probes()) { 430 if (compilation()->env()->dtrace_method_probes()) {
496 __ get_thread(rax); 431 __ move(A0, thread);
497 __ movptr(Address(rsp, 0), rax); 432 __ mov_metadata(A1, method()->constant_encoding());
498 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 433 __ patchable_call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit));
499 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
500 } 434 }
501 435
502 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 436 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
503 __ mov(rax, rsi); // Restore the exception 437 __ move(V0, S0); // Restore the exception
504 } 438 }
505 439
506 // remove the activation and dispatch to the unwind handler 440 // remove the activation and dispatch to the unwind handler
507 __ remove_frame(initial_frame_size_in_bytes()); 441 // leave activation of nmethod
508 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 442 __ remove_frame(initial_frame_size_in_bytes());
443
444 __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id));
445 __ delayed()->nop();
509 446
510 // Emit the slow path assembly 447 // Emit the slow path assembly
511 if (stub != NULL) { 448 if (stub != NULL) {
512 stub->emit_code(this); 449 stub->emit_code(this);
513 } 450 }
514 */ 451
515 return offset; 452 return offset;
516 } 453 }
517 454
518 455
519 int LIR_Assembler::emit_deopt_handler() { 456 int LIR_Assembler::emit_deopt_handler() {
651 588
652 589
653 void LIR_Assembler::return_op(LIR_Opr result) { 590 void LIR_Assembler::return_op(LIR_Opr result) {
654 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0"); 591 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, "word returns are in V0");
655 // Pop the stack before the safepoint code 592 // Pop the stack before the safepoint code
656 __ leave(); 593 __ remove_frame(initial_frame_size_in_bytes());
657 #ifndef _LP64 594 #ifndef _LP64
658 //by aoqi 595 //by aoqi
659 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page() 596 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()
660 + (SafepointPollOffset % os::vm_page_size()))); 597 + (SafepointPollOffset % os::vm_page_size())));
661 __ relocate(relocInfo::poll_return_type); 598 __ relocate(relocInfo::poll_return_type);
672 __ relocate(relocInfo::poll_return_type); 609 __ relocate(relocInfo::poll_return_type);
673 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()))); 610 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size())));
674 #endif 611 #endif
675 #endif 612 #endif
676 613
614 __ pop(RA);
677 __ jr(RA); 615 __ jr(RA);
678 __ delayed()->nop(); 616 __ delayed()->nop();
679 } 617 }
680 618
681 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006 619 //read protect mem to R0 won't cause the exception only in godson-2e, So I modify R0 to AT .@jerome,11/25,2006
722 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 660 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
723 assert(src->is_constant(), "should not call otherwise"); 661 assert(src->is_constant(), "should not call otherwise");
724 assert(dest->is_register(), "should not call otherwise"); 662 assert(dest->is_register(), "should not call otherwise");
725 LIR_Const* c = src->as_constant_ptr(); 663 LIR_Const* c = src->as_constant_ptr();
726 switch (c->type()) { 664 switch (c->type()) {
665 case T_ADDRESS: {
666 assert(patch_code == lir_patch_none, "no patching handled here");
667 __ move(dest->as_register(), c->as_jint()); // FIXME
668 break;
669 }
670
727 case T_INT: { 671 case T_INT: {
728 jint con = c->as_jint(); 672 assert(patch_code == lir_patch_none, "no patching handled here");
729 if (dest->is_single_cpu()) { 673 __ move(dest->as_register(), c->as_jint());
730 assert(patch_code == lir_patch_none, "no patching handled here");
731 __ move(dest->as_register(), con);
732 } else {
733 assert(dest->is_single_fpu(), "wrong register kind");
734 __ move(AT, con);
735 __ mtc1(AT, dest->as_float_reg());
736 }
737 break; 674 break;
738 } 675 }
739 676
740 case T_LONG: { 677 case T_LONG: {
741 #ifndef _LP64 678 #ifndef _LP64
770 jobject2reg_with_patching(dest->as_register(), info); 707 jobject2reg_with_patching(dest->as_register(), info);
771 } 708 }
772 break; 709 break;
773 } 710 }
774 711
712 case T_METADATA: {
713 if (patch_code != lir_patch_none) {
714 klass2reg_with_patching(dest->as_register(), info);
715 } else {
716 __ mov_metadata(dest->as_register(), c->as_metadata());
717 }
718 break;
719 }
720
775 case T_FLOAT: { 721 case T_FLOAT: {
776 address const_addr = float_constant(c->as_jfloat()); 722 address const_addr = float_constant(c->as_jfloat());
777 assert (const_addr != NULL, "must create float constant in the constant table"); 723 assert (const_addr != NULL, "must create float constant in the constant table");
778 724
779 if (dest->is_single_fpu()) { 725 if (dest->is_single_fpu()) {
851 switch (c->type()) { 797 switch (c->type()) {
852 case T_INT: // fall through 798 case T_INT: // fall through
853 case T_FLOAT: 799 case T_FLOAT:
854 __ move(AT, c->as_jint_bits()); 800 __ move(AT, c->as_jint_bits());
855 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix())); 801 __ sw(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
802 break;
803
804 case T_ADDRESS:
805 __ move(AT, c->as_jint_bits());
806 __ st_ptr(AT, frame_map()->address_for_slot(dest->single_stack_ix()));
856 break; 807 break;
857 808
858 case T_OBJECT: 809 case T_OBJECT:
859 if (c->as_jobject() == NULL) { 810 if (c->as_jobject() == NULL) {
860 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix())); 811 __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix()));
954 if(c->as_jint_bits() != 0) { 905 if(c->as_jint_bits() != 0) {
955 __ move(AT, c->as_jint_bits()); 906 __ move(AT, c->as_jint_bits());
956 __ sw(AT, as_Address(addr)); 907 __ sw(AT, as_Address(addr));
957 } else 908 } else
958 __ sw(R0, as_Address(addr)); 909 __ sw(R0, as_Address(addr));
910 break;
911 case T_ADDRESS:
912 __ move(AT, c->as_jint_bits());
913 __ st_ptr(AT, as_Address(addr));
959 break; 914 break;
960 case T_BOOLEAN: // fall through 915 case T_BOOLEAN: // fall through
961 case T_BYTE: 916 case T_BYTE:
962 if(c->as_jint() != 0) { 917 if(c->as_jint() != 0) {
963 __ move(AT, c->as_jint()); 918 __ move(AT, c->as_jint());
1215 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value)); 1170 __ swc1(src->as_float_reg(), AT, Assembler::split_low(disp_value));
1216 } 1171 }
1217 break; 1172 break;
1218 1173
1219 case T_LONG: { 1174 case T_LONG: {
1220 Register from_lo = src->as_register_lo(); 1175 Register from_lo = src->as_register_lo();
1221 Register from_hi = src->as_register_hi(); 1176 Register from_hi = src->as_register_hi();
1222 #ifdef _LP64 1177 #ifdef _LP64
1223 if (needs_patching) { 1178 if (needs_patching) {
1224 __ add(AT, dest_reg, disp_reg); 1179 __ add(AT, dest_reg, disp_reg);
1225 __ st_ptr(from_lo, AT, 0); 1180 __ st_ptr(from_lo, AT, 0);
1533 1488
1534 case T_OBJECT: 1489 case T_OBJECT:
1535 case T_ARRAY: 1490 case T_ARRAY:
1536 if (UseCompressedOops && !wide) { 1491 if (UseCompressedOops && !wide) {
1537 if (disp_reg == noreg) { 1492 if (disp_reg == noreg) {
1538 __ lw(dest->as_register(), src_reg, disp_value); 1493 __ lwu(dest->as_register(), src_reg, disp_value);
1539 } else if (needs_patching) { 1494 } else if (needs_patching) {
1540 __ dadd(AT, src_reg, disp_reg); 1495 __ dadd(AT, src_reg, disp_reg);
1541 offset = code_offset(); 1496 offset = code_offset();
1542 __ lw(dest->as_register(), AT, 0); 1497 __ lwu(dest->as_register(), AT, 0);
1543 } else { 1498 } else {
1544 __ dadd(AT, src_reg, disp_reg); 1499 __ dadd(AT, src_reg, disp_reg);
1545 offset = code_offset(); 1500 offset = code_offset();
1546 __ lw(dest->as_register(), AT, Assembler::split_low(disp_value)); 1501 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1547 } 1502 }
1548
1549 } else { 1503 } else {
1550 if (disp_reg == noreg) { 1504 if (disp_reg == noreg) {
1551 __ ld_ptr(dest->as_register(), src_reg, disp_value); 1505 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1552 } else if (needs_patching) { 1506 } else if (needs_patching) {
1553 __ dadd(AT, src_reg, disp_reg); 1507 __ dadd(AT, src_reg, disp_reg);
1554 offset = code_offset(); 1508 offset = code_offset();
1555 __ ld_ptr(dest->as_register(), AT, 0); 1509 __ ld_ptr(dest->as_register(), AT, 0);
1556 } else { 1510 } else {
1557 __ dadd(AT, src_reg, disp_reg); 1511 __ dadd(AT, src_reg, disp_reg);
1558 offset = code_offset(); 1512 offset = code_offset();
1559 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value)); 1513 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1560 } 1514 }
1561 } 1515 }
1562 break; 1516 break;
1563 case T_ADDRESS: 1517 case T_ADDRESS:
1564 if (disp_reg == noreg) { 1518 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1519 if (disp_reg == noreg) {
1520 __ lwu(dest->as_register(), src_reg, disp_value);
1521 } else if (needs_patching) {
1522 __ dadd(AT, src_reg, disp_reg);
1523 offset = code_offset();
1524 __ lwu(dest->as_register(), AT, 0);
1525 } else {
1526 __ dadd(AT, src_reg, disp_reg);
1527 offset = code_offset();
1528 __ lwu(dest->as_register(), AT, Assembler::split_low(disp_value));
1529 }
1530 } else {
1531 if (disp_reg == noreg) {
1565 __ ld_ptr(dest->as_register(), src_reg, disp_value); 1532 __ ld_ptr(dest->as_register(), src_reg, disp_value);
1566 } else if (needs_patching) { 1533 } else if (needs_patching) {
1567 __ dadd(AT, src_reg, disp_reg); 1534 __ dadd(AT, src_reg, disp_reg);
1568 offset = code_offset(); 1535 offset = code_offset();
1569 __ ld_ptr(dest->as_register(), AT, 0); 1536 __ ld_ptr(dest->as_register(), AT, 0);
1570 } else { 1537 } else {
1571 __ dadd(AT, src_reg, disp_reg); 1538 __ dadd(AT, src_reg, disp_reg);
1572 offset = code_offset(); 1539 offset = code_offset();
1573 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value)); 1540 __ ld_ptr(dest->as_register(), AT, Assembler::split_low(disp_value));
1541 }
1574 } 1542 }
1575 break; 1543 break;
1576 case T_INT: { 1544 case T_INT: {
1577 //assert(to_reg.is_word(), "just check"); 1545 //assert(to_reg.is_word(), "just check");
1578 if (disp_reg == noreg) { 1546 if (disp_reg == noreg) {
1702 if (UseCompressedOops && !wide) { 1670 if (UseCompressedOops && !wide) {
1703 __ decode_heap_oop(dest->as_register()); 1671 __ decode_heap_oop(dest->as_register());
1704 } 1672 }
1705 #endif 1673 #endif
1706 __ verify_oop(dest->as_register()); 1674 __ verify_oop(dest->as_register());
1675 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1676 if (UseCompressedClassPointers) {
1677 __ decode_klass_not_null(dest->as_register());
1678 }
1707 } 1679 }
1708 if (info != NULL) add_debug_info_for_null_check(offset, info); 1680 if (info != NULL) add_debug_info_for_null_check(offset, info);
1709 } 1681 }
1710 1682
1711 1683
3086 *op->stub()->entry()); 3058 *op->stub()->entry());
3087 } 3059 }
3088 __ bind(*op->stub()->continuation()); 3060 __ bind(*op->stub()->continuation());
3089 } 3061 }
3090 3062
3063 void LIR_Assembler::type_profile_helper(Register mdo,
3064 ciMethodData *md, ciProfileData *data,
3065 Register recv, Label* update_done) {
3066 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3067 Label next_test;
3068 // See if the receiver is receiver[n].
3069 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
3070 __ bne(AT, recv, next_test);
3071 __ delayed()->nop();
3072 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
3073 __ ld_ptr(AT, data_addr);
3074 __ addi(AT, AT, DataLayout::counter_increment);
3075 __ st_ptr(AT, data_addr);
3076 __ b(*update_done);
3077 __ delayed()->nop();
3078 __ bind(next_test);
3079 }
3080
3081 // Didn't find receiver; find next empty slot and fill it in
3082 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
3083 Label next_test;
3084 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
3085 __ ld_ptr(AT, recv_addr);
3086 __ bne(AT, R0, next_test);
3087 __ delayed()->nop();
3088 __ st_ptr(recv, recv_addr);
3089 __ move(AT, DataLayout::counter_increment);
3090 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
3091 __ b(*update_done);
3092 __ delayed()->nop();
3093 __ bind(next_test);
3094 }
3095 }
3096
3097 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
3098 // we always need a stub for the failure case.
3099 CodeStub* stub = op->stub();
3100 Register obj = op->object()->as_register();
3101 Register k_RInfo = op->tmp1()->as_register();
3102 Register klass_RInfo = op->tmp2()->as_register();
3103 Register dst = op->result_opr()->as_register();
3104 ciKlass* k = op->klass();
3105 Register Rtmp1 = noreg;
3106
3107 // check if it needs to be profiled
3108 ciMethodData* md;
3109 ciProfileData* data;
3110
3111 if (op->should_profile()) {
3112 ciMethod* method = op->profiled_method();
3113 assert(method != NULL, "Should have method");
3114 int bci = op->profiled_bci();
3115 md = method->method_data_or_null();
3116 assert(md != NULL, "Sanity");
3117 data = md->bci_to_data(bci);
3118 assert(data != NULL, "need data for type check");
3119 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3120 }
3121 Label profile_cast_success, profile_cast_failure;
3122 Label *success_target = op->should_profile() ? &profile_cast_success : success;
3123 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
3124
3125 if (obj == k_RInfo) {
3126 k_RInfo = dst;
3127 } else if (obj == klass_RInfo) {
3128 klass_RInfo = dst;
3129 }
3130 if (k->is_loaded() && !UseCompressedClassPointers) {
3131 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
3132 } else {
3133 Rtmp1 = op->tmp3()->as_register();
3134 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
3135 }
3136
3137 assert_different_registers(obj, k_RInfo, klass_RInfo);
3138
3139 if (op->should_profile()) {
3140 Label not_null;
3141 __ bne(obj, R0, not_null);
3142 __ delayed()->nop();
3143 // Object is null; update MDO and exit
3144 Register mdo = klass_RInfo;
3145 __ mov_metadata(mdo, md->constant_encoding());
3146 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3147 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3148 __ lw(AT, data_addr);
3149 __ ori(AT, AT, header_bits);
3150 __ sw(AT,data_addr);
3151 __ b(*obj_is_null);
3152 __ delayed()->nop();
3153 __ bind(not_null);
3154 } else {
3155 __ beq(obj, R0, *obj_is_null);
3156 __ delayed()->nop();
3157 }
3158
3159 if (!k->is_loaded()) {
3160 klass2reg_with_patching(k_RInfo, op->info_for_patch());
3161 } else {
3162 #ifdef _LP64
3163 __ mov_metadata(k_RInfo, k->constant_encoding());
3164 #endif // _LP64
3165 }
3166 __ verify_oop(obj);
3167
3168 if (op->fast_check()) {
3169 // get object class
3170 // not a safepoint as obj null check happens earlier
3171 if (UseCompressedClassPointers) {
3172 __ load_klass(Rtmp1, obj);
3173 __ bne(k_RInfo, Rtmp1, *failure_target);
3174 __ delayed()->nop();
3175 } else {
3176 __ ld(AT, Address(obj, oopDesc::klass_offset_in_bytes()));
3177 __ bne(k_RInfo, AT, *failure_target);
3178 __ delayed()->nop();
3179 }
3180 // successful cast, fall through to profile or jump
3181 } else {
3182 // get object class
3183 // not a safepoint as obj null check happens earlier
3184 __ load_klass(klass_RInfo, obj);
3185 if (k->is_loaded()) {
3186 // See if we get an immediate positive hit
3187 __ ld(AT, Address(klass_RInfo, k->super_check_offset()));
3188 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
3189 __ bne(k_RInfo, AT, *failure_target);
3190 __ delayed()->nop();
3191 // successful cast, fall through to profile or jump
3192 } else {
3193 // See if we get an immediate positive hit
3194 __ beq(k_RInfo, AT, *success_target);
3195 __ delayed()->nop();
3196 // check for self
3197 __ beq(k_RInfo, klass_RInfo, *success_target);
3198 __ delayed()->nop();
3199
3200 __ push(klass_RInfo);
3201 __ push(k_RInfo);
3202 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3203 __ pop(klass_RInfo);
3204 __ pop(klass_RInfo);
3205 // result is a boolean
3206 __ beq(klass_RInfo, R0, *failure_target);
3207 __ delayed()->nop();
3208 // successful cast, fall through to profile or jump
3209 }
3210 } else {
3211 // perform the fast part of the checking logic
3212 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
3213 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
3214 __ push(klass_RInfo);
3215 __ push(k_RInfo);
3216 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3217 __ pop(klass_RInfo);
3218 __ pop(k_RInfo);
3219 // result is a boolean
3220 __ beq(k_RInfo, R0, *failure_target);
3221 __ delayed()->nop();
3222 // successful cast, fall through to profile or jump
3223 }
3224 }
3225 if (op->should_profile()) {
3226 Register mdo = klass_RInfo, recv = k_RInfo;
3227 __ bind(profile_cast_success);
3228 __ mov_metadata(mdo, md->constant_encoding());
3229 __ load_klass(recv, obj);
3230 Label update_done;
3231 type_profile_helper(mdo, md, data, recv, success);
3232 __ b(*success);
3233 __ delayed()->nop();
3234
3235 __ bind(profile_cast_failure);
3236 __ mov_metadata(mdo, md->constant_encoding());
3237 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3238 __ ld_ptr(AT, counter_addr);
3239 __ addi(AT, AT, -DataLayout::counter_increment);
3240 __ st_ptr(AT, counter_addr);
3241
3242 __ b(*failure);
3243 __ delayed()->nop();
3244 }
3245 __ b(*success);
3246 __ delayed()->nop();
3247 }
3248
3091 3249
3092 3250
3093 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 3251 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
3094 LIR_Code code = op->code(); 3252 LIR_Code code = op->code();
3095 // if (code == lir_store_check) { 3253 if (code == lir_store_check) {
3096 Register value = op->object()->as_register(); 3254 Register value = op->object()->as_register();
3097 Register array = op->array()->as_register(); 3255 Register array = op->array()->as_register();
3098 Register k_RInfo = op->tmp1()->as_register(); 3256 Register k_RInfo = op->tmp1()->as_register();
3099 Register klass_RInfo = op->tmp2()->as_register(); 3257 Register klass_RInfo = op->tmp2()->as_register();
3100 Register tmp = op->tmp3()->as_register(); 3258 Register tmp = op->tmp3()->as_register();
3101 3259
3102 CodeStub* stub = op->stub(); 3260 CodeStub* stub = op->stub();
3261
3103 //check if it needs to be profiled 3262 //check if it needs to be profiled
3104 ciMethodData* md; 3263 ciMethodData* md;
3105 ciProfileData* data; 3264 ciProfileData* data;
3265
3106 if (op->should_profile()) { 3266 if (op->should_profile()) {
3107 ciMethod* method = op->profiled_method(); 3267 ciMethod* method = op->profiled_method();
3108 assert(method != NULL, "Should have method"); 3268 assert(method != NULL, "Should have method");
3109 int bci = op->profiled_bci(); 3269 int bci = op->profiled_bci();
3110 md = method->method_data_or_null(); 3270 md = method->method_data_or_null();
3111 assert(md != NULL, "Sanity"); 3271 assert(md != NULL, "Sanity");
3112 data = md->bci_to_data(bci); 3272 data = md->bci_to_data(bci);
3113 assert(data != NULL, "need data for type check"); 3273 assert(data != NULL, "need data for type check");
3114 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 3274 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
3115 } 3275 }
3116 Label profile_cast_success, profile_cast_failure, done; 3276 Label profile_cast_success, profile_cast_failure, done;
3117 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 3277 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
3118 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; 3278 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
3119 //__ cmpptr(value, (int32_t)NULL_WORD); 3279
3120 if(op->should_profile()) { 3280 if(op->should_profile()) {
3121 Label not_null; 3281 Label not_null;
3122 __ bne(value, R0, not_null); 3282 __ bne(value, R0, not_null);
3123 __ delayed()->nop(); 3283 __ delayed()->nop();
3124 3284
3125 // __ jcc(Assembler::notEqual, profile_done); 3285 Register mdo = klass_RInfo;
3126 // __ bne(obj, R0, profile_done); 3286 __ mov_metadata(mdo, md->constant_encoding());
3127 //__ delayed()->nop(); 3287 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3128 3288 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3129 // Object is null; update methodDataOop 3289 __ lw(AT, data_addr);
3130 //ciMethodData* md = method->method_data(); 3290 __ ori(AT, AT, header_bits);
3131 //if (md == NULL) { 3291 __ sw(AT,data_addr);
3132 // bailout("out of memory building methodDataOop"); 3292 __ b(done);
3133 // return; 3293 __ delayed()->nop();
3134 // } 3294 __ bind(not_null);
3135 // ciProfileData* data = md->bci_to_data(bci); 3295 } else {
3136 //assert(data != NULL, "need data for checkcast"); 3296 __ beq(value, R0, done);
3137 // assert(data->is_BitData(), "need BitData for checkcast"); 3297 __ delayed()->nop();
3138 Register mdo = klass_RInfo; 3298 }
3139 int oop_index = __ oop_recorder()->find_index(md->constant_encoding()); 3299
3140 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3141 __ relocate(rspec);
3142 #ifndef _LP64
3143 //by_css
3144 __ lui(mdo, Assembler::split_high((int)md->constant_encoding()));
3145 __ addiu(mdo, mdo, Assembler::split_low((int)md->consant_encoding()));
3146 #else
3147 __ li48(mdo, (long)md->constant_encoding());
3148 #endif
3149
3150 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
3151 //FIXME, it very ineffictive to replace orl with 3 mips instruction @jerome, 12/27,06
3152 //__ orl(data_addr, BitData::null_flag_constant());
3153 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
3154 __ lw(AT, data_addr);
3155 __ ori(AT, AT, header_bits);
3156 __ sw(AT,data_addr);
3157 __ b(done);
3158 __ delayed()->nop();
3159 __ bind(not_null);
3160 } else {
3161 __ beq(value, R0, done);
3162 __ delayed()->nop();
3163 }
3164 //__ verify_oop(obj);
3165 add_debug_info_for_null_check_here(op->info_for_exception()); 3300 add_debug_info_for_null_check_here(op->info_for_exception());
3166 __ load_klass(k_RInfo, array); 3301 __ load_klass(k_RInfo, array);
3167 __ load_klass(klass_RInfo, value); 3302 __ load_klass(klass_RInfo, value);
3168 // get instance klass (it's already uncompressed) 3303 // get instance klass (it's already uncompressed)
3169 //__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 3304 __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
3170 __ daddi (k_RInfo, k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset())); 3305 // perform the fast part of the checking logic
3171 // perform the fast part of the checking logic 3306 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, tmp, success_target, failure_target, NULL);
3172 //__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 3307 __ push(klass_RInfo);
3173 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 3308 __ push(k_RInfo);
3174 //1899 __ push(klass_RInfo); 3309 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
3175 //1900 __ push(k_RInfo); 3310 __ pop(klass_RInfo);
3176 //1901 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 3311 __ pop(k_RInfo);
3177 //1902 __ pop(klass_RInfo); 3312 // result is a boolean
3178 //1903 __ pop(k_RInfo); 3313 __ beq(k_RInfo, R0, *failure_target);
3179 //1904 // result is a boolean 3314 __ delayed()->nop();
3180 ///1905 __ cmpl(k_RInfo, 0); 3315 // fall through to the success case
3181 //1906 __ jcc(Assembler::equal, *failure_target); 3316
3182 //1907 // fall through to the success case 3317 if (op->should_profile()) {
3183 //1908 3318 Register mdo = klass_RInfo, recv = k_RInfo;
3184 //1909 if (op->should_profile()) { 3319 __ bind(profile_cast_success);
3185 //1910 Register mdo = klass_RInfo, recv = k_RInfo; 3320 __ mov_metadata(mdo, md->constant_encoding());
3186 //1911 __ bind(profile_cast_success); 3321 __ load_klass(recv, value);
3187 //1912 __ mov_metadata(mdo, md->constant_encoding()); 3322 Label update_done;
3188 //1913 __ load_klass(recv, value); 3323 type_profile_helper(mdo, md, data, recv, &done);
3189 //1914 Label update_done; 3324 __ b(done);
3190 //1915 type_profile_helper(mdo, md, data, recv, &done); 3325 __ delayed()->nop();
3191 //1916 __ jmpb(done); 3326
3192 //1917 3327 __ bind(profile_cast_failure);
3193 //1918 __ bind(profile_cast_failure); 3328 __ mov_metadata(mdo, md->constant_encoding());
3194 //1919 __ mov_metadata(mdo, md->constant_encoding()); 3329 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3195 //1920 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3330 __ ld_ptr(AT, counter_addr);
3196 //1921 __ subptr(counter_addr, DataLayout::counter_increment); 3331 __ addi(AT, AT, -DataLayout::counter_increment);
3197 //1922 __ jmp(*stub->entry()); 3332 __ st_ptr(AT, counter_addr);
3198 //1923 } 3333 __ b(*stub->entry());
3199 //1925 __ bind(done); 3334 __ delayed()->nop();
3200 //1926 } else 3335 }
3201 //1927 if (code == lir_checkcast) { 3336
3202 //1928 Register obj = op->object()->as_register(); 3337 __ bind(done);
3203 //1929 Register dst = op->result_opr()->as_register(); 3338 } else if (code == lir_checkcast) {
3204 //1930 Label success; 3339 Register obj = op->object()->as_register();
3205 //1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 3340 Register dst = op->result_opr()->as_register();
3206 //1932 __ bind(success); 3341 Label success;
3207 //1933 if (dst != obj) { 3342 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
3208 //1934 __ mov(dst, obj); 3343 __ bind(success);
3209 //1935 } 3344 if (dst != obj) {
3210 //1936 } else 3345 __ move(dst, obj);
3211 //1937 if (code == lir_instanceof) { 3346 }
3212 //1938 Register obj = op->object()->as_register(); 3347 } else if (code == lir_instanceof) {
3213 ///1939 Register dst = op->result_opr()->as_register(); 3348 Register obj = op->object()->as_register();
3214 //1940 Label success, failure, done; 3349 Register dst = op->result_opr()->as_register();
3215 //1941 emit_typecheck_helper(op, &success, &failure, &failure); 3350 Label success, failure, done;
3216 ///1942 __ bind(failure); 3351 emit_typecheck_helper(op, &success, &failure, &failure);
3217 //1943 __ xorptr(dst, dst); 3352 __ bind(failure);
3218 //1944 __ jmpb(done); 3353 __ move(dst, R0);
3219 //1945 __ bind(success); 3354 __ b(done);
3220 //1946 __ movptr(dst, 1); 3355 __ delayed()->nop();
3221 //1947 __ bind(done); 3356 __ bind(success);
3222 //1948 } else { 3357 __ addi(dst, R0, 1);
3223 //1949 ShouldNotReachHere(); 3358 __ bind(done);
3224 //1950 } 3359 } else {
3225 //FIXME:wuhui. 3360 ShouldNotReachHere();
3226 3361 }
3227 } 3362 }
3228
3229 3363
3230 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 3364 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
3231 if (op->code() == lir_cas_long) { 3365 if (op->code() == lir_cas_long) {
3232 #ifdef _LP64 3366 #ifdef _LP64
3233 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 3367 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
3368 } else if (right->is_stack()) { 3502 } else if (right->is_stack()) {
3369 // cpu register - stack 3503 // cpu register - stack
3370 Unimplemented(); 3504 Unimplemented();
3371 } else if (right->is_constant()) { 3505 } else if (right->is_constant()) {
3372 // cpu register - constant 3506 // cpu register - constant
3373 Register res = dest->as_register(); 3507 Register res;
3374 jint c = right->as_constant_ptr()->as_jint(); 3508 if (dest->is_double_cpu()) {
3509 res = dest->as_register_lo();
3510 } else {
3511 res = dest->as_register();
3512 }
3513 jint c;
3514 if (right->type() == T_INT) {
3515 c = right->as_constant_ptr()->as_jint();
3516 } else {
3517 c = right->as_constant_ptr()->as_jlong();
3518 }
3375 3519
3376 switch (code) { 3520 switch (code) {
3377 case lir_mul_strictfp: 3521 case lir_mul_strictfp:
3378 case lir_mul: 3522 case lir_mul:
3379 __ move(AT, c); 3523 __ move(AT, c);
3761 Register reg = left->as_register(); 3905 Register reg = left->as_register();
3762 if (right->is_constant()) { 3906 if (right->is_constant()) {
3763 int val = right->as_constant_ptr()->as_jint(); 3907 int val = right->as_constant_ptr()->as_jint();
3764 __ move(AT, val); 3908 __ move(AT, val);
3765 switch (code) { 3909 switch (code) {
3766 case lir_logic_and: 3910 case lir_logic_and:
3767 __ andr (dstreg, reg, AT); 3911 __ andr (dstreg, reg, AT);
3768 break; 3912 break;
3769 case lir_logic_or: 3913 case lir_logic_or:
3770 __ orr(dstreg, reg, AT); 3914 __ orr(dstreg, reg, AT);
3771 break; 3915 break;
3772 case lir_logic_xor: 3916 case lir_logic_xor:
3773 __ xorr(dstreg, reg, AT); 3917 __ xorr(dstreg, reg, AT);
3774 break; 3918 break;
3775 default: ShouldNotReachHere(); 3919 default: ShouldNotReachHere();
3776 } 3920 }
3777 } else if (right->is_stack()) { 3921 } else if (right->is_stack()) {
3778 // added support for stack operands 3922 // added support for stack operands
3779 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 3923 Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
3780 switch (code) { 3924 switch (code) {
3781 case lir_logic_and: 3925 case lir_logic_and:
3782 //FIXME. lw or ld_ptr? 3926 //FIXME. lw or ld_ptr?
3783 __ lw(AT, raddr); 3927 __ lw(AT, raddr);
3784 __ andr(reg, reg,AT); 3928 __ andr(reg, reg,AT);
3785 break; 3929 break;
3786 case lir_logic_or: 3930 case lir_logic_or:
3787 __ lw(AT, raddr); 3931 __ lw(AT, raddr);
3788 __ orr(reg, reg, AT); 3932 __ orr(reg, reg, AT);
3789 break; 3933 break;
3790 case lir_logic_xor: 3934 case lir_logic_xor:
3791 __ lw(AT, raddr); 3935 __ lw(AT, raddr);
3792 __ xorr(reg, reg, AT); 3936 __ xorr(reg, reg, AT);
3793 break; 3937 break;
3794 default: ShouldNotReachHere(); 3938 default: ShouldNotReachHere();
3795 } 3939 }
3796 } else { 3940 } else {
3797 Register rright = right->as_register(); 3941 Register rright = right->as_register();
3798 switch (code) { 3942 switch (code) {
3799 case lir_logic_and: __ andr (dstreg, reg, rright); break; 3943 case lir_logic_and: __ andr (dstreg, reg, rright); break;
3800 case lir_logic_or : __ orr (dstreg, reg, rright); break; 3944 case lir_logic_or : __ orr (dstreg, reg, rright); break;
3801 case lir_logic_xor: __ xorr (dstreg, reg, rright); break; 3945 case lir_logic_xor: __ xorr (dstreg, reg, rright); break;
3802 default: ShouldNotReachHere(); 3946 default: ShouldNotReachHere();
3803 } 3947 }
3804 } 3948 }
3805 } else { 3949 } else {
3806 Register l_lo = left->as_register_lo(); 3950 Register l_lo = left->as_register_lo();
3807 Register dst_lo = dst->as_register_lo(); 3951 Register dst_lo = dst->as_register_lo();
3815 3959
3816 int r_lo = right->as_constant_ptr()->as_jint_lo(); 3960 int r_lo = right->as_constant_ptr()->as_jint_lo();
3817 int r_hi = right->as_constant_ptr()->as_jint_hi(); 3961 int r_hi = right->as_constant_ptr()->as_jint_hi();
3818 3962
3819 switch (code) { 3963 switch (code) {
3820 case lir_logic_and: 3964 case lir_logic_and:
3821 __ move(AT, r_lo); 3965 __ move(AT, r_lo);
3822 __ andr(dst_lo, l_lo, AT); 3966 __ andr(dst_lo, l_lo, AT);
3823 __ move(AT, r_hi); 3967 __ move(AT, r_hi);
3824 __ andr(dst_hi, l_hi, AT); 3968 __ andr(dst_hi, l_hi, AT);
3825 break; 3969 break;
3826 3970
3827 case lir_logic_or: 3971 case lir_logic_or:
3828 __ move(AT, r_lo); 3972 __ move(AT, r_lo);
3829 __ orr(dst_lo, l_lo, AT); 3973 __ orr(dst_lo, l_lo, AT);
3830 __ move(AT, r_hi); 3974 __ move(AT, r_hi);
3831 __ orr(dst_hi, l_hi, AT); 3975 __ orr(dst_hi, l_hi, AT);
3832 break; 3976 break;
3833 3977
3834 case lir_logic_xor: 3978 case lir_logic_xor:
3835 __ move(AT, r_lo); 3979 __ move(AT, r_lo);
3836 __ xorr(dst_lo, l_lo, AT); 3980 __ xorr(dst_lo, l_lo, AT);
3837 __ move(AT, r_hi); 3981 __ move(AT, r_hi);
3838 __ xorr(dst_hi, l_hi, AT); 3982 __ xorr(dst_hi, l_hi, AT);
3839 break; 3983 break;
3840 3984
3841 default: ShouldNotReachHere(); 3985 default: ShouldNotReachHere();
3842 } 3986 }
3843 #else 3987 #else
3844 __ li(AT, right->as_constant_ptr()->as_jlong()); 3988 __ li(AT, right->as_constant_ptr()->as_jlong());
3845 3989
3846 switch (code) { 3990 switch (code) {
3847 case lir_logic_and: 3991 case lir_logic_and:
3848 __ andr(dst_lo, l_lo, AT); 3992 __ andr(dst_lo, l_lo, AT);
3849 break; 3993 break;
3850 3994
3851 case lir_logic_or: 3995 case lir_logic_or:
3852 __ orr(dst_lo, l_lo, AT); 3996 __ orr(dst_lo, l_lo, AT);
3853 break; 3997 break;
3854 3998
3855 case lir_logic_xor: 3999 case lir_logic_xor:
3856 __ xorr(dst_lo, l_lo, AT); 4000 __ xorr(dst_lo, l_lo, AT);
3857 break; 4001 break;
3858 4002
3859 default: ShouldNotReachHere(); 4003 default: ShouldNotReachHere();
3860 } 4004 }
3861 #endif 4005 #endif
3862 4006
3863 } else { 4007 } else {
3864 Register r_lo = right->as_register_lo(); 4008 Register r_lo = right->as_register_lo();
3865 Register r_hi = right->as_register_hi(); 4009 Register r_hi = right->as_register_hi();
3866 4010
3867 switch (code) { 4011 switch (code) {
3868 case lir_logic_and: 4012 case lir_logic_and:
3869 __ andr(dst_lo, l_lo, r_lo); 4013 __ andr(dst_lo, l_lo, r_lo);
3870 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);) 4014 NOT_LP64(__ andr(dst_hi, l_hi, r_hi);)
3871 break; 4015 break;
3872 case lir_logic_or: 4016 case lir_logic_or:
3873 __ orr(dst_lo, l_lo, r_lo); 4017 __ orr(dst_lo, l_lo, r_lo);
3874 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);) 4018 NOT_LP64(__ orr(dst_hi, l_hi, r_hi);)
3875 break; 4019 break;
3876 case lir_logic_xor: 4020 case lir_logic_xor:
3877 __ xorr(dst_lo, l_lo, r_lo); 4021 __ xorr(dst_lo, l_lo, r_lo);
3878 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);) 4022 NOT_LP64(__ xorr(dst_hi, l_hi, r_hi);)
3879 break; 4023 break;
3880 default: ShouldNotReachHere(); 4024 default: ShouldNotReachHere();
3881 } 4025 }
3882 } 4026 }
3883 } 4027 }
3884 } 4028 }
3885 4029
4109 } 4253 }
4110 } 4254 }
4111 4255
4112 4256
4113 void LIR_Assembler::align_call(LIR_Code code) { 4257 void LIR_Assembler::align_call(LIR_Code code) {
4114 //FIXME. aoqi, this right?
4115 // do nothing since all instructions are word aligned on sparc
4116 /*
4117 if (os::is_MP()) {
4118 // make sure that the displacement word of the call ends up word aligned
4119 int offset = __ offset();
4120 switch (code) {
4121 case lir_static_call:
4122 case lir_optvirtual_call:
4123 offset += NativeCall::displacement_offset;
4124 break;
4125 case lir_icvirtual_call:
4126 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
4127 break;
4128 case lir_virtual_call: // currently, sparc-specific for niagara
4129 default: ShouldNotReachHere();
4130 }
4131 while (offset++ % BytesPerWord != 0) {
4132 __ nop();
4133 }
4134 }
4135 */
4136 } 4258 }
4137 4259
4138 4260
4139 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 4261 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
4140 //assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); 4262 //assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned");
4143 add_call_info(code_offset(), op->info()); 4265 add_call_info(code_offset(), op->info());
4144 } 4266 }
4145 4267
4146 4268
4147 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 4269 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
4148 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 4270 __ ic_call(op->addr());
4149 // int oop_index = __ oop_recorder()->allocate_oop_index((jobject)Universe::non_oop_word()); 4271 add_call_info(code_offset(), op->info());
4150 // RelocationHolder rspec = oop_Relocation::spec(oop_index);
4151 /// __ relocate(rspec);
4152 #ifndef _LP64
4153 //by_css
4154 __ lui(IC_Klass, Assembler::split_high((int)Universe::non_oop_word()));
4155 __ addiu(IC_Klass, IC_Klass, Assembler::split_low((int)Universe::non_oop_word()));
4156 #else
4157 __ li48(IC_Klass, (long)Universe::non_oop_word());
4158 #endif
4159 __ call(op->addr(), rh);
4160 __ delayed()->nop();
4161 // add_call_info(code_offset(), info);
4162
4163 add_call_info(code_offset(), op->info());
4164 assert(!os::is_MP() ||
4165 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
4166 "must be aligned");
4167
4168 } 4272 }
4169 4273
4170 4274
4171 /* Currently, vtable-dispatch is only enabled for sparc platforms */ 4275 /* Currently, vtable-dispatch is only enabled for sparc platforms */
4172 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 4276 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
4180 address stub = __ start_a_stub(call_stub_size); 4284 address stub = __ start_a_stub(call_stub_size);
4181 if (stub == NULL) { 4285 if (stub == NULL) {
4182 bailout("static call stub overflow"); 4286 bailout("static call stub overflow");
4183 return; 4287 return;
4184 } 4288 }
4185
4186 int start = __ offset(); 4289 int start = __ offset();
4187 /*
4188 if (os::is_MP()) {
4189 // make sure that the displacement word of the call ends up word aligned
4190 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
4191 while (offset++ % BytesPerWord != 0) {
4192 __ nop();
4193 }
4194 }
4195 */
4196 __ relocate(static_stub_Relocation::spec(call_pc)); 4290 __ relocate(static_stub_Relocation::spec(call_pc));
4197 jobject o=NULL; 4291
4198 int oop_index = __ oop_recorder()->allocate_oop_index((jobject)o); 4292 Metadata *o = NULL;
4199 RelocationHolder rspec = oop_Relocation::spec(oop_index); 4293 int index = __ oop_recorder()->allocate_metadata_index(o);
4294 RelocationHolder rspec = metadata_Relocation::spec(index);
4200 __ relocate(rspec); 4295 __ relocate(rspec);
4201 //see set_to_interpreted 4296 //see set_to_interpreted
4202 #ifndef _LP64 4297 __ patchable_set48(Rmethod, (long)o);
4203 __ lui(T7, Assembler::split_high((int)o)); 4298
4204 __ addiu(T7, T7, Assembler::split_low((int)o)); 4299 __ patchable_set48(AT, (long)-1);
4205 #else
4206 __ li48(Rmethod, (long)o);
4207 #endif
4208 #ifndef _LP64
4209 __ lui(AT, Assembler::split_high((int)-1));
4210 __ addiu(AT, AT, Assembler::split_low((int)-1));
4211 #else
4212 __ li48(AT, (long)-1);
4213 #endif
4214 //assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
4215 __ jr(AT); 4300 __ jr(AT);
4216 __ delayed()->nop(); 4301 __ delayed()->nop();
4217 assert(__ offset() - start <= call_stub_size, "stub too big"); 4302 assert(__ offset() - start <= call_stub_size, "stub too big");
4218 __ end_a_stub(); 4303 __ end_a_stub();
4219 } 4304 }
4225 4310
4226 // exception object is not added to oop map by LinearScan 4311 // exception object is not added to oop map by LinearScan
4227 // (LinearScan assumes that no oops are in fixed registers) 4312 // (LinearScan assumes that no oops are in fixed registers)
4228 4313
4229 info->add_register_oop(exceptionOop); 4314 info->add_register_oop(exceptionOop);
4230 //if (!unwind) { 4315 long pc_for_athrow = (long)__ pc();
4231 // get current pc information 4316 int pc_for_athrow_offset = __ offset();
4232 // pc is only needed if the method has an exception handler, the unwind code does not need it. 4317 Register epc = exceptionPC->as_register();
4233 #ifndef _LP64 4318 __ relocate(relocInfo::internal_pc_type);
4234 //by_css 4319 __ li48(epc, pc_for_athrow);
4235 int pc_for_athrow = (int)__ pc(); 4320 add_call_info(pc_for_athrow_offset, info); // for exception handler
4236 int pc_for_athrow_offset = __ offset(); 4321 __ verify_not_null_oop(V0);
4237 Register epc = exceptionPC->as_register(); 4322 // search an exception handler (eax: exception oop, edx: throwing pc)
4238 //__ nop(); 4323 if (compilation()->has_fpu_code()) {
4239 // pc_for_athrow can not point to itself (relocInfo restriction), no need now 4324 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4240 __ relocate(relocInfo::internal_pc_type); 4325 relocInfo::runtime_call_type);
4241 __ lui(epc, Assembler::split_high(pc_for_athrow)); 4326 } else {
4242 __ addiu(epc, epc, Assembler::split_low(pc_for_athrow)); 4327 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4243 #else 4328 relocInfo::runtime_call_type);
4244 long pc_for_athrow = (long)__ pc(); 4329 }
4245 int pc_for_athrow_offset = __ offset();
4246 Register epc = exceptionPC->as_register();
4247 //__ nop();
4248 // pc_for_athrow can not point to itself (relocInfo restriction), no need now
4249 __ relocate(relocInfo::internal_pc_type);
4250 __ li48(epc, pc_for_athrow);
4251 #endif
4252 add_call_info(pc_for_athrow_offset, info); // for exception handler
4253 __ verify_not_null_oop(V0);
4254 // search an exception handler (eax: exception oop, edx: throwing pc)
4255 if (compilation()->has_fpu_code()) {
4256 __ call(Runtime1::entry_for(Runtime1::handle_exception_id),
4257 relocInfo::runtime_call_type);
4258 } else {
4259 __ call(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id),
4260 relocInfo::runtime_call_type);
4261 }
4262 // } else {
4263 // __ call(Runtime1::entry_for(Runtime1::unwind_exception_id),
4264 // relocInfo::runtime_call_type);
4265 // }
4266
4267 // enough room for two byte trap
4268 __ delayed()->nop(); 4330 __ delayed()->nop();
4269 } 4331 }
4270 4332
4271 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop){ 4333 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
4272 assert(exceptionOop->as_register()== FSR, "must match"); 4334 assert(exceptionOop->as_register()== FSR, "must match");
4273 __ b(_unwind_handler_entry); 4335 __ b(_unwind_handler_entry);
4274 __ delayed()->nop(); 4336 __ delayed()->nop();
4275 } 4337 }
4276 4338
4277 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 4339 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
4278 // optimized version for linear scan: 4340 // optimized version for linear scan:
4279 // * tmp must be unused 4341 // * tmp must be unused
4280 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 4342 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
4294 ShouldNotReachHere(); 4356 ShouldNotReachHere();
4295 } 4357 }
4296 assert_different_registers(count_reg, value_reg); 4358 assert_different_registers(count_reg, value_reg);
4297 switch (code) { 4359 switch (code) {
4298 case lir_shl: 4360 case lir_shl:
4299 if (dest->type() == T_INT) 4361 if (dest->type() == T_INT)
4300 __ sllv(dest_reg, value_reg, count_reg); 4362 __ sllv(dest_reg, value_reg, count_reg);
4301 else 4363 else
4302 __ dsllv(dest_reg, value_reg, count_reg); 4364 __ dsllv(dest_reg, value_reg, count_reg);
4303 break; 4365 break;
4304 //__ dsllv(dest_reg, value_reg, count_reg); break;
4305 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break; 4366 case lir_shr: __ dsrav(dest_reg, value_reg, count_reg); break;
4306 case lir_ushr: 4367 case lir_ushr:
4307 #if 1 4368 #if 1
4308 /* 4369 /*
4309 Jin: in java, ushift_right requires 32-bit UNSIGNED operation! 4370 Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4323 4384
4324 // java.math.MutableBigInteger::primitiveRightShift 4385 // java.math.MutableBigInteger::primitiveRightShift
4325 // 4386 //
4326 // 108 ushift_right [a6|I] [a4|I] [a4|I] 4387 // 108 ushift_right [a6|I] [a4|I] [a4|I]
4327 // 0x00000055646d2f70: dsll32 a4, a6, 0 \ 4388 // 0x00000055646d2f70: dsll32 a4, a6, 0 \
4328 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error! 4389 // 0x00000055646d2f74: dsrl32 a4, a4, 0 |- error!
4329 // 0x00000055646d2f78: dsrl a4, a4, a4 / 4390 // 0x00000055646d2f78: dsrl a4, a4, a4 /
4330 if (left->type() == T_INT && dest->type() == T_INT) 4391 if (left->type() == T_INT && dest->type() == T_INT) {
4331 { 4392 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits
4332 __ dsll32(AT, value_reg, 0); // Omit the high 32 bits 4393 __ dsrl32(AT, AT, 0);
4333 __ dsrl32(AT, AT, 0); 4394 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift
4334 __ dsrlv(dest_reg, AT, count_reg); // Unsigned right shift 4395 break;
4335 break; 4396 }
4336 }
4337 #endif 4397 #endif
4338 __ dsrlv(dest_reg, value_reg, count_reg); break; 4398 __ dsrlv(dest_reg, value_reg, count_reg); break;
4339 default: ShouldNotReachHere(); 4399 default: ShouldNotReachHere();
4340 } 4400 }
4341 #else 4401 #else
4492 __ dsll(dest_reg, value_reg, count); 4552 __ dsll(dest_reg, value_reg, count);
4493 break; 4553 break;
4494 case lir_shr: __ dsra(dest_reg, value_reg, count); break; 4554 case lir_shr: __ dsra(dest_reg, value_reg, count); break;
4495 case lir_ushr: 4555 case lir_ushr:
4496 #if 1 4556 #if 1
4497 if (left->type() == T_INT && dest->type() == T_INT) 4557 if (left->type() == T_INT && dest->type() == T_INT) {
4498 {
4499 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation! 4558 /* Jin: in java, ushift_right requires 32-bit UNSIGNED operation!
4500 However, dsrl will shift in company with the highest 32 bits. 4559 However, dsrl will shift in company with the highest 32 bits.
4501 Thus, if the source register contains a negative value, 4560 Thus, if the source register contains a negative value,
4502 the resulti is incorrect. 4561 the resulti is incorrect.
4503 4562
4788 __ bne_far(AT, R0, *stub->entry()); 4847 __ bne_far(AT, R0, *stub->entry());
4789 __ delayed()->nop(); 4848 __ delayed()->nop();
4790 } 4849 }
4791 4850
4792 if (flags & LIR_OpArrayCopy::type_check) { 4851 if (flags & LIR_OpArrayCopy::type_check) {
4793 if (UseCompressedOops) { 4852 if (UseCompressedClassPointers) {
4794 __ lw(AT, src_klass_addr); 4853 __ lw(AT, src_klass_addr);
4795 __ lw(tmp, dst_klass_addr); 4854 __ lw(tmp, dst_klass_addr);
4796 } else { 4855 } else {
4797 __ ld(AT, src_klass_addr); 4856 __ ld(AT, src_klass_addr); __ ld(tmp, dst_klass_addr);
4798 __ ld(tmp, dst_klass_addr); 4857 }
4799 }
4800 __ bne_far(AT, tmp, *stub->entry()); 4858 __ bne_far(AT, tmp, *stub->entry());
4801 __ delayed()->nop(); 4859 __ delayed()->nop();
4802 } 4860 }
4803 4861
4804 #ifdef ASSERT 4862 #ifdef ASSERT
4808 // case, if no type check is needed then the dst type must match the 4866 // case, if no type check is needed then the dst type must match the
4809 // expected type and the src type is so subtype which we can't check. If 4867 // expected type and the src type is so subtype which we can't check. If
4810 // a type check i needed then at this point the classes are known to be 4868 // a type check i needed then at this point the classes are known to be
4811 // the same but again which don't know which type so we can't check them. 4869 // the same but again which don't know which type so we can't check them.
4812 Label known_ok, halt; 4870 Label known_ok, halt;
4813 //FIXME:wuhui. not finished. __ mov_metadata(tmp, default_type->constant_encoding()); 4871 __ mov_metadata(tmp, default_type->constant_encoding());
4814 #ifdef _LP64 4872 #ifdef _LP64
4815 if (UseCompressedOops) { 4873 if (UseCompressedClassPointers) {
4816 __ encode_heap_oop(AT); 4874 __ encode_klass_not_null(tmp);
4817 __ lw(tmp, dst_klass_addr); 4875 }
4818 } else 4876 #endif
4819 #endif
4820 {
4821 __ ld(tmp, dst_klass_addr);
4822 }
4823 if (basic_type != T_OBJECT) { 4877 if (basic_type != T_OBJECT) {
4878 if (UseCompressedClassPointers) {
4879 __ lw(AT, dst_klass_addr);
4880 } else {
4881 __ ld(AT, dst_klass_addr);
4882 }
4824 __ bne(AT, tmp, halt); 4883 __ bne(AT, tmp, halt);
4825 __ delayed()->nop(); 4884 __ delayed()->nop();
4826 if (UseCompressedOops) { 4885 if (UseCompressedClassPointers) {
4827 __ lw(tmp, src_klass_addr); 4886 __ lw(AT, src_klass_addr);
4828 } else { 4887 } else {
4829 __ ld(tmp, src_klass_addr); 4888 __ ld(AT, src_klass_addr);
4830 } 4889 }
4831 __ beq(AT, tmp, known_ok); 4890 __ beq(AT, tmp, known_ok);
4832 __ delayed()->nop(); 4891 __ delayed()->nop();
4833 } else { 4892 } else {
4834 if (UseCompressedOops) { 4893 if (UseCompressedClassPointers) {
4835 __ lw(tmp, dst_klass_addr); 4894 __ lw(AT, dst_klass_addr);
4836 } else { 4895 } else {
4837 __ ld(tmp, dst_klass_addr); 4896 __ ld(AT, dst_klass_addr);
4838 } 4897 }
4839 __ beq(AT, tmp, known_ok); 4898 __ beq(AT, tmp, known_ok);
4840 __ delayed()->nop(); 4899 __ delayed()->nop();
4841 __ beq(src, dst, known_ok); 4900 __ beq(src, dst, known_ok);
4842 __ delayed()->nop(); 4901 __ delayed()->nop();
4843 } 4902 }
4844 __ bind(halt); 4903 __ bind(halt);
4845 __ stop("incorrect type information in arraycopy"); 4904 __ stop("incorrect type information in arraycopy");
4846 __ bind(known_ok); 4905 __ bind(known_ok);
4847 } 4906 }
4914 Register obj = op->obj_opr()->as_register(); // may not be an oop 4973 Register obj = op->obj_opr()->as_register(); // may not be an oop
4915 Register hdr = op->hdr_opr()->as_register(); 4974 Register hdr = op->hdr_opr()->as_register();
4916 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo(); 4975 Register lock = op->lock_opr()->is_single_cpu() ? op->lock_opr()->as_register(): op->lock_opr()->as_register_lo();
4917 if (!UseFastLocking) { 4976 if (!UseFastLocking) {
4918 __ b_far(*op->stub()->entry()); 4977 __ b_far(*op->stub()->entry());
4978 __ delayed()->nop();
4919 } else if (op->code() == lir_lock) { 4979 } else if (op->code() == lir_lock) {
4920 Register scratch = noreg; 4980 Register scratch = noreg;
4921 if (UseBiasedLocking) { 4981 if (UseBiasedLocking) {
4922 scratch = op->scratch_opr()->as_register(); 4982 scratch = op->scratch_opr()->as_register();
4923 } 4983 }
4955 ciProfileData* data = md->bci_to_data(bci); 5015 ciProfileData* data = md->bci_to_data(bci);
4956 assert(data->is_CounterData(), "need CounterData for calls"); 5016 assert(data->is_CounterData(), "need CounterData for calls");
4957 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 5017 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
4958 Register mdo = op->mdo()->as_register(); 5018 Register mdo = op->mdo()->as_register();
4959 5019
4960 int oop_index = __ oop_recorder()->find_index(md->constant_encoding()); 5020 __ mov_metadata(mdo, md->constant_encoding());
4961 RelocationHolder rspec = oop_Relocation::spec(oop_index);
4962 __ relocate(rspec);
4963 #ifndef _LP64
4964 //by_css
4965 __ lui(mdo, Assembler::split_high((int)md->constant_encoding()));
4966 __ addiu(mdo, mdo, Assembler::split_low((int)md->constant_encoding()));
4967 #else
4968 __ li48(mdo, (long)md->constant_encoding());
4969 #endif
4970 5021
4971 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 5022 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
4972 __ lw(AT, counter_addr); 5023 __ ld_ptr(AT, counter_addr);
4973 __ addi(AT,AT, DataLayout::counter_increment); 5024 __ addi(AT, AT, DataLayout::counter_increment);
4974 __ sw(AT,counter_addr); 5025 __ st_ptr(AT, counter_addr);
4975 5026
4976 Bytecodes::Code bc = method->java_code_at_bci(bci); 5027 Bytecodes::Code bc = method->java_code_at_bci(bci);
4977 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 5028 const bool callee_is_static = callee->is_loaded() && callee->is_static();
4978 // Perform additional virtual call profiling for invokevirtual and 5029 // Perform additional virtual call profiling for invokevirtual and
4979 // invokeinterface bytecodes 5030 // invokeinterface bytecodes
4980 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 5031 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
4981 !callee_is_static && //required for optimized MH invokes 5032 !callee_is_static && //required for optimized MH invokes
4982 C1ProfileVirtualCalls) { 5033 C1ProfileVirtualCalls) {
4983 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 5034 assert(op->recv()->is_single_cpu(), "recv must be allocated");
4984 Register recv = op->recv()->as_register(); 5035 Register recv = op->recv()->as_register();
4985 assert_different_registers(mdo, recv); 5036 assert_different_registers(mdo, recv);
4986 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 5037 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
4987 ciKlass* known_klass = op->known_holder(); 5038 ciKlass* known_klass = op->known_holder();
4988 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 5039 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
4989 // We know the type that will be seen at this call site; we can 5040 // We know the type that will be seen at this call site; we can
4990 // statically update the methodDataOop rather than needing to do 5041 // statically update the methodDataOop rather than needing to do
4991 // dynamic tests on the receiver type 5042 // dynamic tests on the receiver type
4992 5043
4993 // NOTE: we should probably put a lock around this search to 5044 // NOTE: we should probably put a lock around this search to
4994 // avoid collisions by concurrent compilations 5045 // avoid collisions by concurrent compilations
4995 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 5046 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
4996 uint i; 5047 uint i;
4997 for (i = 0; i < VirtualCallData::row_limit(); i++) { 5048 for (i = 0; i < VirtualCallData::row_limit(); i++) {
4998 ciKlass* receiver = vc_data->receiver(i); 5049 ciKlass* receiver = vc_data->receiver(i);
4999 if (known_klass->equals(receiver)) { 5050 if (known_klass->equals(receiver)) {
5000 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 5051 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5001 __ lw(AT, data_addr); 5052 __ ld_ptr(AT, data_addr);
5002 __ addi(AT, AT, DataLayout::counter_increment); 5053 __ addi(AT, AT, DataLayout::counter_increment);
5003 __ sw(AT, data_addr); 5054 __ st_ptr(AT, data_addr);
5004 return; 5055 return;
5005 } 5056 }
5006 } 5057 }
5007 5058
5008 // Receiver type not found in profile data; select an empty slot 5059 // Receiver type not found in profile data; select an empty slot
5009 5060
5010 // Note that this is less efficient than it should be because it 5061 // Note that this is less efficient than it should be because it
5011 // always does a write to the receiver part of the 5062 // always does a write to the receiver part of the
5012 // VirtualCallData rather than just the first time 5063 // VirtualCallData rather than just the first time
5013 for (i = 0; i < VirtualCallData::row_limit(); i++) { 5064 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5014 ciKlass* receiver = vc_data->receiver(i); 5065 ciKlass* receiver = vc_data->receiver(i);
5015 if (receiver == NULL) { 5066 if (receiver == NULL) {
5016 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 5067 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5017 int oop_index = __ oop_recorder()->find_index(known_klass->constant_encoding()); 5068 __ mov_metadata(AT, known_klass->constant_encoding());
5018 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5069 __ st_ptr(AT,recv_addr);
5019 __ relocate(rspec); 5070 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5020 #ifndef _LP64 5071 __ ld_ptr(AT, data_addr);
5021 //by_css 5072 __ addi(AT, AT, DataLayout::counter_increment);
5022 __ lui(AT, Assembler::split_high((int)known_klass->constant_encoding())); 5073 __ st_ptr(AT, data_addr);
5023 __ addiu(AT, AT, Assembler::split_low((int)known_klass->constant_encoding())); 5074 return;
5024 #else 5075 }
5025 __ li48(AT, (long)known_klass->constant_encoding());
5026 #endif
5027 __ st_ptr(AT,recv_addr);
5028 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5029 __ lw(AT, data_addr);
5030 __ addi(AT, AT, DataLayout::counter_increment);
5031 __ sw(AT, data_addr);
5032 return;
5033 }
5034 } 5076 }
5035 } else { 5077 } else {
5036 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); 5078 //__ ld_ptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
5037 __ load_klass(recv, recv); 5079 __ load_klass(recv, recv);
5038 Label update_done; 5080 Label update_done;
5039 uint i; 5081 uint i;
5040 for (i = 0; i < VirtualCallData::row_limit(); i++) { 5082 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5041 Label next_test; 5083 Label next_test;
5042 // See if the receiver is receiver[n]. 5084 // See if the receiver is receiver[n].
5043 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); 5085 __ ld_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
5044 __ bne(recv,AT,next_test); 5086 __ bne(recv,AT,next_test);
5045 __ delayed()->nop(); 5087 __ delayed()->nop();
5046 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 5088 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
5047 __ lw(AT, data_addr); 5089 __ ld_ptr(AT, data_addr);
5048 __ addi(AT, AT, DataLayout::counter_increment); 5090 __ addi(AT, AT, DataLayout::counter_increment);
5049 __ sw(AT, data_addr); 5091 __ st_ptr(AT, data_addr);
5050 __ b(update_done); 5092 __ b(update_done);
5051 __ delayed()->nop(); 5093 __ delayed()->nop();
5052 __ bind(next_test); 5094 __ bind(next_test);
5053 } 5095 }
5054 5096
5055 // Didn't find receiver; find next empty slot and fill it in 5097 // Didn't find receiver; find next empty slot and fill it in
5056 for (i = 0; i < VirtualCallData::row_limit(); i++) { 5098 for (i = 0; i < VirtualCallData::row_limit(); i++) {
5057 Label next_test; 5099 Label next_test;
5058 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 5100 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
5059 __ ld_ptr(AT, recv_addr); 5101 __ ld_ptr(AT, recv_addr);
5060 __ bne(AT, R0, next_test); 5102 __ bne(AT, R0, next_test);
5061 __ delayed()->nop(); 5103 __ delayed()->nop();
5062 __ st_ptr(recv, recv_addr); 5104 __ st_ptr(recv, recv_addr);
5063 __ move(AT,DataLayout::counter_increment); 5105 __ move(AT, DataLayout::counter_increment);
5064 __ sw(AT,Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)))); 5106 __ st_ptr(AT, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))));
5065 if (i < (VirtualCallData::row_limit() - 1)) { 5107 if (i < (VirtualCallData::row_limit() - 1)) {
5066 __ b(update_done); 5108 __ b(update_done);
5067 __ delayed()->nop(); 5109 __ delayed()->nop();
5068 } 5110 }
5069 __ bind(next_test); 5111 __ bind(next_test);
5070 } 5112 }
5071 5113 __ bind(update_done);
5072 __ bind(update_done);
5073 } 5114 }
5074 } 5115 }
5075 } 5116 }
5076 5117
5077 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 5118 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
5090 else if (dst->is_double_cpu()) 5131 else if (dst->is_double_cpu())
5091 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no)); 5132 __ lea(dst->as_register_lo(), frame_map()->address_for_monitor_lock(monitor_no));
5092 } 5133 }
5093 5134
5094 void LIR_Assembler::align_backward_branch_target() { 5135 void LIR_Assembler::align_backward_branch_target() {
5136 __ align(BytesPerWord);
5095 } 5137 }
5096 5138
5097 5139
5098 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 5140 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
5099 if (left->is_single_cpu()) { 5141 if (left->is_single_cpu()) {
5122 }else { 5164 }else {
5123 ShouldNotReachHere(); 5165 ShouldNotReachHere();
5124 } 5166 }
5125 } 5167 }
5126 5168
5127
5128 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 5169 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
5129 assert(addr->is_address() && dest->is_register(), "check"); 5170 assert(addr->is_address() && dest->is_register(), "check");
5130 Register reg = dest->as_register(); 5171 Register reg;
5131 __ lea(dest->as_register(), as_Address(addr->as_address_ptr())); 5172 reg = dest->as_pointer_register();
5173 __ lea(reg, as_Address(addr->as_address_ptr()));
5132 } 5174 }
5133 5175
5134 5176
5135 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 5177 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
5136 if (o == NULL) { 5178 if (o == NULL) {
5140 #ifndef _LP64 5182 #ifndef _LP64
5141 //by_css 5183 //by_css
5142 __ lui(reg, Assembler::split_high((int)o)); 5184 __ lui(reg, Assembler::split_high((int)o));
5143 __ addiu(reg, reg, Assembler::split_low((int)o)); 5185 __ addiu(reg, reg, Assembler::split_low((int)o));
5144 #else 5186 #else
5145 //__ li48(reg, (long)o); 5187 __ li48(reg, (long)o);
5146 __ li(reg, (long)o); 5188 //__ patchable_set48(reg, (long)o);
5147 #endif 5189 #endif
5148 } else { 5190 } else {
5149 int oop_index = __ oop_recorder()->find_index(o); 5191 int oop_index = __ oop_recorder()->find_index(o);
5150 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5192 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5151 __ relocate(rspec); 5193 __ relocate(rspec);
5152 #ifndef _LP64 5194 #ifndef _LP64
5153 //by_css 5195 //by_css
5154 __ lui(reg, Assembler::split_high((int)o)); 5196 __ lui(reg, Assembler::split_high((int)o));
5155 __ addiu(reg, reg, Assembler::split_low((int)o)); 5197 __ addiu(reg, reg, Assembler::split_low((int)o));
5156 #else 5198 #else
5157 //__ li48(reg, (long)o); 5199 __ li48(reg, (long)o);
5158 __ li(reg, (long)o); 5200 //__ patchable_set48(reg, (long)o);
5159 #endif 5201 #endif
5160 } 5202 }
5161 } 5203 }
5162 5204
5163 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 5205 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
5319 ShouldNotReachHere(); 5361 ShouldNotReachHere();
5320 #endif 5362 #endif
5321 } else { 5363 } else {
5322 ShouldNotReachHere(); 5364 ShouldNotReachHere();
5323 }*/ 5365 }*/
5366 ShouldNotReachHere();
5324 } 5367 }
5325 5368
5326 #undef __ 5369 #undef __

mercurial