src/cpu/mips/vm/c1_Runtime1_mips.cpp

changeset 8865
ffcdff41a92f
parent 6880
52ea28d233d2
child 9132
0f025dcc49cc
equal deleted inserted replaced
8864:e4aeef458496 8865:ffcdff41a92f
58 58
59 set_num_rt_args(1 + args_size); 59 set_num_rt_args(1 + args_size);
60 60
61 61
62 // push java thread (becomes first argument of C function) 62 // push java thread (becomes first argument of C function)
63 #ifndef OPT_THREAD
64 get_thread(thread); 63 get_thread(thread);
65 #endif
66 move(A0, thread); 64 move(A0, thread);
67 65
68 set_last_Java_frame(thread, NOREG, FP, NULL); 66 set_last_Java_frame(thread, NOREG, FP, NULL);
69 NOT_LP64(addi(SP, SP, - wordSize * (1+args_size))); 67 NOT_LP64(addi(SP, SP, - wordSize * (1+args_size)));
70 move(AT, -(StackAlignmentInBytes)); 68 move(AT, -(StackAlignmentInBytes));
137 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 135 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
138 delayed()->nop(); 136 delayed()->nop();
139 } else if (_stub_id == Runtime1::forward_exception_id) { 137 } else if (_stub_id == Runtime1::forward_exception_id) {
140 should_not_reach_here(); 138 should_not_reach_here();
141 } else { 139 } else {
142 jmp(Runtime1::entry_for(Runtime1::forward_exception_id), 140 jmp(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type);
143 relocInfo::runtime_call_type);
144 delayed()->nop(); 141 delayed()->nop();
145 } 142 }
146 bind(L); 143 bind(L);
147 } 144 }
148 // get oop results if there are any and reset the values in the thread 145 // get oop results if there are any and reset the values in the thread
325 // to simply save their current value. 322 // to simply save their current value.
326 //FIXME, I have no idea which register should be saved . @jerome 323 //FIXME, I have no idea which register should be saved . @jerome
327 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 324 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
328 bool save_fpu_registers = true, bool describe_fpu_registers = false) { 325 bool save_fpu_registers = true, bool describe_fpu_registers = false) {
329 326
330 /* Jin: num_rt_args is caculated by 8 bytes. */ 327 LP64_ONLY(num_rt_args = 0);
331 int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / SLOT_PER_WORD; // args + thread 328 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
332 sasm->set_frame_size(frame_size_in_slots / SLOT_PER_WORD); 329 int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / VMRegImpl::slots_per_word; // args + thread
330 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
333 331
334 // record saved value locations in an OopMap 332 // record saved value locations in an OopMap
335 // locations are offsets from sp after runtime call; num_rt_args is number of arguments 333 // locations are offsets from sp after runtime call; num_rt_args is number of arguments
336 // in call, including thread 334 // in call, including thread
337 OopMap* map = new OopMap(reg_save_frame_size, 0); 335 OopMap* map = new OopMap(reg_save_frame_size, 0);
405 #endif 403 #endif
406 return map; 404 return map;
407 } 405 }
408 406
409 //FIXME, Is it enough to save this registers by yyq 407 //FIXME, Is it enough to save this registers by yyq
410 static OopMap* save_live_registers(StubAssembler* sasm, 408 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
411 int num_rt_args, 409 bool save_fpu_registers = true,
412 bool save_fpu_registers = true,
413 bool describe_fpu_registers = false) { 410 bool describe_fpu_registers = false) {
414 //const int reg_save_frame_size = return_off + 1 + num_rt_args; 411 //const int reg_save_frame_size = return_off + 1 + num_rt_args;
415 __ block_comment("save_live_registers"); 412 __ block_comment("save_live_registers");
416 413
417 // save all register state - int, fpu 414 // save all register state - int, fpu
605 return oop_maps; 602 return oop_maps;
606 } 603 }
607 604
608 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome 605 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome
609 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 606 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
610 __ block_comment("generate_handle_exception"); 607 __ block_comment("generate_handle_exception");
611 // incoming parameters 608
609 // incoming parameters
612 const Register exception_oop = V0; 610 const Register exception_oop = V0;
613 const Register exception_pc = V1; 611 const Register exception_pc = V1;
614 // other registers used in this stub 612 // other registers used in this stub
615 // const Register real_return_addr = T3; 613 // const Register real_return_addr = T3;
616 const Register thread = T8; 614 const Register thread = TREG;
615 #ifndef OPT_THREAD
616 __ get_thread(thread);
617 #endif
617 // Save registers, if required. 618 // Save registers, if required.
618 OopMapSet* oop_maps = new OopMapSet(); 619 OopMapSet* oop_maps = new OopMapSet();
619 OopMap* oop_map = NULL; 620 OopMap* oop_map = NULL;
620 switch (id) { 621 switch (id) {
621 case forward_exception_id: 622 case forward_exception_id:
622 // We're handling an exception in the context of a compiled frame. 623 // We're handling an exception in the context of a compiled frame.
623 // The registers have been saved in the standard places. Perform 624 // The registers have been saved in the standard places. Perform
624 // an exception lookup in the caller and dispatch to the handler 625 // an exception lookup in the caller and dispatch to the handler
625 // if found. Otherwise unwind and dispatch to the callers 626 // if found. Otherwise unwind and dispatch to the callers
626 // exception handler. 627 // exception handler.
627 oop_map = generate_oop_map(sasm, 1 /*thread*/); 628 oop_map = generate_oop_map(sasm, 1 /*thread*/);
628 629
629 // load and clear pending exception oop into RAX 630 // load and clear pending exception oop into RAX
630 __ ld(exception_oop, Address(thread, Thread::pending_exception_offset())); 631 __ ld_ptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
631 __ sw(R0,Address(thread, Thread::pending_exception_offset())); 632 __ st_ptr(R0, Address(thread, Thread::pending_exception_offset()));
632 633
633 // load issuing PC (the return address for this stub) into rdx 634 // load issuing PC (the return address for this stub) into rdx
634 __ ld(exception_pc, Address(FP, 1*BytesPerWord)); 635 __ ld_ptr(exception_pc, Address(FP, 1*BytesPerWord));
635 636
636 // make sure that the vm_results are cleared (may be unnecessary) 637 // make sure that the vm_results are cleared (may be unnecessary)
637 __ sw(R0,Address(thread, JavaThread::vm_result_offset())); 638 __ st_ptr(R0, Address(thread, JavaThread::vm_result_offset()));
638 __ sw(R0,Address(thread, JavaThread::vm_result_2_offset())); 639 __ st_ptr(R0, Address(thread, JavaThread::vm_result_2_offset()));
639 break; 640 break;
640 case handle_exception_nofpu_id: 641 case handle_exception_nofpu_id:
641 case handle_exception_id: 642 case handle_exception_id:
642 // At this point all registers MAY be live. 643 // At this point all registers MAY be live.
643 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); 644 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
644 break; 645 break;
645 case handle_exception_from_callee_id: { 646 case handle_exception_from_callee_id: {
646 // At this point all registers except exception oop (RAX) and 647 // At this point all registers except exception oop (RAX) and
647 // exception pc (RDX) are dead. 648 // exception pc (RDX) are dead.
648 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_by tes / BytesPerWord); 649 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/);
649 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 650 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
650 sasm->set_frame_size(frame_size); 651 sasm->set_frame_size(frame_size);
651 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 652 break;
652 break; 653 }
653 } 654 default: ShouldNotReachHere();
654 default: ShouldNotReachHere(); 655 }
655 }
656 656
657 #ifdef TIERED 657 #ifdef TIERED
658 // C2 can leave the fpu stack dirty 658 // C2 can leave the fpu stack dirty
659 __ empty_FPU_stack(); 659 __ empty_FPU_stack();
660 //}
661 #endif // TIERED 660 #endif // TIERED
662 661
663 // verify that only V0 and V1 is valid at this time 662 // verify that only V0 and V1 is valid at this time
664 // verify that V0 contains a valid exception 663 // verify that V0 contains a valid exception
665 __ verify_not_null_oop(exception_oop); 664 __ verify_not_null_oop(exception_oop);
687 // save exception oop and issuing pc into JavaThread 686 // save exception oop and issuing pc into JavaThread
688 // (exception handler will load it from here) 687 // (exception handler will load it from here)
689 __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); 688 __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
690 __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); 689 __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
691 690
692 // save real return address (pc that called this stub)
693 // __ ld_ptr(real_return_addr, FP, 1*BytesPerWord);
694 // __ st_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD);
695
696 // patch throwing pc into return address (has bci & oop map) 691 // patch throwing pc into return address (has bci & oop map)
697 __ st_ptr(exception_pc, FP, 1*BytesPerWord); 692 __ st_ptr(exception_pc, Address(FP, 1*BytesPerWord));
693
698 // compute the exception handler. 694 // compute the exception handler.
699 // the exception oop and the throwing pc are read from the fields in JavaThread 695 // the exception oop and the throwing pc are read from the fields in JavaThread
700 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 696 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
701 exception_handler_for_pc));
702 oop_maps->add_gc_map(call_offset, oop_map); 697 oop_maps->add_gc_map(call_offset, oop_map);
703 // V0: handler address or NULL if no handler exists 698 // V0: handler address or NULL if no handler exists
704 // will be the deopt blob if nmethod was deoptimized while we looked up 699 // will be the deopt blob if nmethod was deoptimized while we looked up
705 // handler regardless of whether handler existed in the nmethod. 700 // handler regardless of whether handler existed in the nmethod.
706 701
707 // only V0 is valid at this time, all other registers have been destroyed by the 702 // only V0 is valid at this time, all other registers have been destroyed by the
708 // runtime call 703 // runtime call
709 704
710 // Do we have an exception handler in the nmethod?
711 /*Label no_handler;
712 Label done;
713 __ beq(V0, R0, no_handler);
714 __ delayed()->nop(); */
715 // exception handler found
716 // patch the return address -> the stub will directly return to the exception handler 705 // patch the return address -> the stub will directly return to the exception handler
717 __ st_ptr(V0, FP, 1 * BytesPerWord); 706 __ st_ptr(V0, Address(FP, 1 * BytesPerWord));
718 707
719 // restore registers 708 switch (id) {
720 // restore_live_registers(sasm, save_fpu_registers); 709 case forward_exception_id:
721 710 case handle_exception_nofpu_id:
722 // return to exception handler 711 case handle_exception_id:
723 // __ leave(); 712 // Restore the registers that were saved at the beginning.
724 // __ jr(RA); 713 restore_live_registers(sasm, id != handle_exception_nofpu_id);
725 // __ delayed()->nop(); 714 break;
726 // __ bind(no_handler); 715 case handle_exception_from_callee_id:
727 // no exception handler found in this method, so the exception is 716 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
728 // forwarded to the caller (using the unwind code of the nmethod) 717 // since we do a leave anyway.
729 // there is no need to restore the registers 718
730 719 // Pop the return address since we are possibly changing SP (restoring from BP).
731 // restore the real return address that was saved before the RT-call 720 __ leave();
732 // __ ld_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD); 721 // Restore SP from BP if the exception PC is a method handle call site.
733 // __ st_ptr(real_return_addr, FP, 1 * BytesPerWord); 722 {
734 // load address of JavaThread object for thread-local data 723 Label done;
735 // __ get_thread(thread); 724 __ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
736 // restore exception oop into eax (convention for unwind code) 725 __ beq(AT, R0, done);
737 // __ ld_ptr(exception_oop, thread, in_bytes(JavaThread::exception_oop_offset())); 726 __ delayed()->nop();
738 727 __ bind(done);
739 // clear exception fields in JavaThread because they are no longer needed 728 }
740 // (fields must be cleared because they are processed by GC otherwise) 729 __ jr(RA); // jump to exception handler
741 // __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset())); 730 __ delayed()->nop();
742 // __ st_ptr(R0,thread, in_bytes(JavaThread::exception_pc_offset())); 731 break;
743 // pop the stub frame off
744 // __ leave();
745 // generate_unwind_exception(sasm);
746 // __ stop("should not reach here");
747 //}
748 switch (id) {
749 case forward_exception_id:
750 case handle_exception_nofpu_id:
751 case handle_exception_id:
752 // Restore the registers that were saved at the beginning.
753 restore_live_registers(sasm, id == handle_exception_nofpu_id);
754 break;
755 case handle_exception_from_callee_id:
756 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
757 // since we do a leave anyway.
758
759 // Pop the return address since we are possibly changing SP (restoring from BP).
760 __ leave();
761 // Restore SP from BP if the exception PC is a method handle call site.
762 NOT_LP64(__ get_thread(thread);)
763 /*__ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
764 __ beq(AT, R0, done);
765 __ move(SP, rbp_mh_SP_save);
766 __ bind(done);
767 __ jr(RA); // jump to exception handler
768 __ delayed()->nop();*/
769 // 759 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
770 // 760 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
771 // 761 __ jmp(rcx); // jump to exception handler
772
773 break;
774 default: ShouldNotReachHere(); 732 default: ShouldNotReachHere();
775 } 733 }
776 734
777 return oop_maps; 735 return oop_maps;
778 } 736 }
779 737
780 738
781 739
782 740
783 741
784 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 742 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
785 // incoming parameters 743 // incoming parameters
786 const Register exception_oop = V0; 744 const Register exception_oop = V0;
745 // callee-saved copy of exception_oop during runtime call
746 const Register exception_oop_callee_saved = S0;
787 // other registers used in this stub 747 // other registers used in this stub
788 const Register exception_pc = V1; 748 const Register exception_pc = V1;
789 const Register handler_addr = T3; 749 const Register handler_addr = T3;
790 const Register thread = T8; 750 const Register thread = TREG;
791 751
792 // verify that only eax is valid at this time 752 // verify that only eax is valid at this time
793 // __ invalidate_registers(false, true, true, true, true, true); 753 // __ invalidate_registers(false, true, true, true, true, true);
794 754
795 #ifdef ASSERT 755 #ifdef ASSERT
802 __ stop("exception oop must be empty"); 762 __ stop("exception oop must be empty");
803 __ bind(oop_empty); 763 __ bind(oop_empty);
804 764
805 Label pc_empty; 765 Label pc_empty;
806 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset())); 766 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset()));
807 __ beq(AT,R0, pc_empty); 767 __ beq(AT, R0, pc_empty);
808 __ delayed()->nop(); 768 __ delayed()->nop();
809 __ stop("exception pc must be empty"); 769 __ stop("exception pc must be empty");
810 __ bind(pc_empty); 770 __ bind(pc_empty);
811 #endif 771 #endif
812 // clear the FPU stack in case any FPU results are left behind 772 // clear the FPU stack in case any FPU results are left behind
813 __ empty_FPU_stack(); 773 __ empty_FPU_stack();
814 774
815 // leave activation of nmethod 775 // save exception_oop in callee-saved register to preserve it during runtime calls
816 __ addi(SP, FP, wordSize); 776 __ verify_not_null_oop(exception_oop);
817 __ ld_ptr(FP, SP, - wordSize); 777 __ move(exception_oop_callee_saved, exception_oop);
778
779 #ifndef OPT_THREAD
780 __ get_thread(thread);
781 #endif
782 // Get return address (is on top of stack after leave).
818 // store return address (is on top of stack after leave) 783 // store return address (is on top of stack after leave)
784
819 __ ld_ptr(exception_pc, SP, 0); 785 __ ld_ptr(exception_pc, SP, 0);
820 __ verify_oop(exception_oop); 786
821
822 // save exception oop from eax to stack before call
823 __ push(exception_oop);
824 // search the exception handler address of the caller (using the return address) 787 // search the exception handler address of the caller (using the return address)
825 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 788 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
826 SharedRuntime::exception_handler_for_return_address), exception_pc); 789 // V0: exception handler address of the caller
827 // eax: exception handler address of the caller
828 790
829 // only eax is valid at this time, all other registers have been destroyed by the call 791 // only eax is valid at this time, all other registers have been destroyed by the call
830 792
831 // move result of call into correct register 793 // move result of call into correct register
832 __ move(handler_addr, V0); 794 __ move(handler_addr, V0);
833 // restore exception oop in eax (required convention of exception handler) 795
834 __ super_pop(exception_oop); 796 // Restore exception oop to V0 (required convention of exception handler).
835 797 __ move(exception_oop, exception_oop_callee_saved);
798
799 // verify that there is really a valid exception in V0
836 __ verify_oop(exception_oop); 800 __ verify_oop(exception_oop);
837 801
838 // get throwing pc (= return address). 802 // get throwing pc (= return address).
839 // edx has been destroyed by the call, so it must be set again 803 // V1 has been destroyed by the call, so it must be set again
840 // the pop is also necessary to simulate the effect of a ret(0) 804 // the pop is also necessary to simulate the effect of a ret(0)
841 __ super_pop(exception_pc); 805 __ super_pop(exception_pc);
842 // verify that that there is really a valid exception in eax
843 __ verify_not_null_oop(exception_oop);
844 806
845 // continue at exception handler (return address removed) 807 // continue at exception handler (return address removed)
846 // note: do *not* remove arguments when unwinding the 808 // note: do *not* remove arguments when unwinding the
847 // activation since the caller assumes having 809 // activation since the caller assumes having
848 // all arguments on the stack when entering the 810 // all arguments on the stack when entering the
849 // runtime to determine the exception handler 811 // runtime to determine the exception handler
850 // (GC happens at call site with arguments!) 812 // (GC happens at call site with arguments!)
851 // eax: exception oop 813 // V0: exception oop
852 // edx: throwing pc 814 // V1: throwing pc
853 // ebx: exception handler 815 // T3: exception handler
854 __ jr(handler_addr); 816 __ jr(handler_addr);
855 __ delayed()->nop(); 817 __ delayed()->nop();
856 } 818 }
857 819
858 820
865 // use the maximum number of runtime-arguments here because it is difficult to 827 // use the maximum number of runtime-arguments here because it is difficult to
866 // distinguish each RT-Call. 828 // distinguish each RT-Call.
867 // Note: This number affects also the RT-Call in generate_handle_exception because 829 // Note: This number affects also the RT-Call in generate_handle_exception because
868 // the oop-map is shared for all calls. 830 // the oop-map is shared for all calls.
869 831
870
871
872
873 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 832 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
874 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 833 assert(deopt_blob != NULL, "deoptimization blob must have been created");
875 // assert(deopt_with_exception_entry_for_patch != NULL, 834 // assert(deopt_with_exception_entry_for_patch != NULL,
876 // "deoptimization blob must have been created"); 835 // "deoptimization blob must have been created");
877 836
878 //OopMap* oop_map = save_live_registers(sasm, num_rt_args); 837 //OopMap* oop_map = save_live_registers(sasm, num_rt_args);
879 OopMap* oop_map = save_live_registers(sasm, 0); 838 OopMap* oop_map = save_live_registers(sasm, 0);
880 #ifndef OPT_THREAD
881 const Register thread = T8; 839 const Register thread = T8;
882 // push java thread (becomes first argument of C function) 840 // push java thread (becomes first argument of C function)
883 __ get_thread(thread); 841 __ get_thread(thread);
884 #else
885 const Register thread = TREG;
886 #endif
887 __ move(A0, thread); 842 __ move(A0, thread);
888 843
889 844
890 /* 845 /*
891 * NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor 846 * NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor
920 __ jalr(T9); 875 __ jalr(T9);
921 __ delayed()->nop(); 876 __ delayed()->nop();
922 OopMapSet* oop_maps = new OopMapSet(); 877 OopMapSet* oop_maps = new OopMapSet();
923 oop_maps->add_gc_map(__ offset(), oop_map); 878 oop_maps->add_gc_map(__ offset(), oop_map);
924 879
925 #ifndef OPT_THREAD
926 __ get_thread(thread); 880 __ get_thread(thread);
927 #endif
928 881
929 __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); 882 __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
930 __ reset_last_Java_frame(thread, true,true); 883 __ reset_last_Java_frame(thread, true,true);
931 // discard thread arg 884 // discard thread arg
932 // check for pending exceptions 885 // check for pending exceptions
936 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); 889 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
937 __ beq(AT, R0, L); 890 __ beq(AT, R0, L);
938 __ delayed()->nop(); 891 __ delayed()->nop();
939 // exception pending => remove activation and forward to exception handler 892 // exception pending => remove activation and forward to exception handler
940 893
941 __ bne(V0,R0, skip); 894 __ bne(V0, R0, skip);
942 __ delayed()->nop(); 895 __ delayed()->nop();
943 // relocInfo::runtime_call_type);
944 __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id), 896 __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id),
945 relocInfo::runtime_call_type); 897 relocInfo::runtime_call_type);
946 __ delayed()->nop(); 898 __ delayed()->nop();
947 __ bind(skip); 899 __ bind(skip);
948 900
1037 OopMapSet* oop_maps = NULL; 989 OopMapSet* oop_maps = NULL;
1038 990
1039 switch (id) { 991 switch (id) {
1040 case forward_exception_id: 992 case forward_exception_id:
1041 { 993 {
1042 // we're handling an exception in the context of a compiled 994 oop_maps = generate_handle_exception(id, sasm);
1043 // frame. The registers have been saved in the standard 995 __ leave();
1044 // places. Perform an exception lookup in the caller and 996 __ jr(RA);
1045 // dispatch to the handler if found. Otherwise unwind and 997 __ delayed()->nop();
1046 // dispatch to the callers exception handler.
1047
1048 const Register exception_oop = V0;
1049 const Register exception_pc = V1;
1050 #ifndef OPT_THREAD
1051 const Register thread = T8;
1052 __ get_thread(thread);
1053 #else
1054 const Register thread = TREG;
1055 #endif
1056 // load pending exception oop into eax
1057 __ ld_ptr(exception_oop, thread, in_bytes(Thread::pending_exception_offset()));
1058 // clear pending exception
1059 __ st_ptr(R0, thread, in_bytes(Thread::pending_exception_offset()));
1060
1061 // load issuing PC (the return address for this stub) into V1
1062 __ ld_ptr(exception_pc, FP, 1*BytesPerWord);
1063
1064 // make sure that the vm_results are cleared (may be unnecessary)
1065 __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_offset())));
1066 __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_2_offset())));
1067
1068 // verify that that there is really a valid exception in eax
1069 __ verify_not_null_oop(exception_oop);
1070
1071
1072 oop_maps = new OopMapSet();
1073 OopMap* oop_map = generate_oop_map(sasm, 0);
1074 generate_handle_exception(id, sasm);
1075 __ stop("should not reach here");
1076 } 998 }
1077 break; 999 break;
1078 1000
1079 case new_instance_id: 1001 case new_instance_id:
1080 case fast_new_instance_id: 1002 case fast_new_instance_id:
1081 case fast_new_instance_init_check_id: 1003 case fast_new_instance_init_check_id:
1082 { 1004 {
1083 // i use T4 as klass register, V0 as result register. MUST accord with NewInstanceStub::emit_code
1084 #ifndef _LP64
1085 Register klass = T4; // Incoming
1086 #else
1087 Register klass = A4; // Incoming 1005 Register klass = A4; // Incoming
1088 #endif
1089 Register obj = V0; // Result 1006 Register obj = V0; // Result
1090 1007
1091 if (id == new_instance_id) { 1008 if (id == new_instance_id) {
1092 __ set_info("new_instance", dont_gc_arguments); 1009 __ set_info("new_instance", dont_gc_arguments);
1093 } else if (id == fast_new_instance_id) { 1010 } else if (id == fast_new_instance_id) {
1104 Register t1 = T2; 1021 Register t1 = T2;
1105 Register t2 = T3; 1022 Register t2 = T3;
1106 assert_different_registers(klass, obj, obj_size, t1, t2); 1023 assert_different_registers(klass, obj, obj_size, t1, t2);
1107 if (id == fast_new_instance_init_check_id) { 1024 if (id == fast_new_instance_init_check_id) {
1108 // make sure the klass is initialized 1025 // make sure the klass is initialized
1109 __ lw(AT, klass, in_bytes(InstanceKlass::init_state_offset())); 1026 __ ld_ptr(AT, Address(klass, in_bytes(InstanceKlass::init_state_offset())));
1110 __ move(t1, InstanceKlass::fully_initialized); 1027 __ move(t1, InstanceKlass::fully_initialized);
1111 __ bne(AT, t1, slow_path); 1028 __ bne(AT, t1, slow_path);
1112 __ delayed()->nop(); 1029 __ delayed()->nop();
1113 } 1030 }
1114 #ifdef ASSERT 1031 #ifdef ASSERT
1178 #ifndef _LP64 1095 #ifndef _LP64
1179 Register bci = T5; 1096 Register bci = T5;
1180 #else 1097 #else
1181 Register bci = A5; 1098 Register bci = A5;
1182 #endif 1099 #endif
1100 Register method = AT;
1183 __ enter(); 1101 __ enter();
1184 OopMap* map = save_live_registers(sasm, 0); 1102 OopMap* map = save_live_registers(sasm, 0);
1185 // Retrieve bci 1103 // Retrieve bci
1186 __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==?? 1104 __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==??
1187 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci); 1105 __ ld(method, Address(FP, 3*BytesPerWord));
1106 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1188 oop_maps = new OopMapSet(); 1107 oop_maps = new OopMapSet();
1189 oop_maps->add_gc_map(call_offset, map); 1108 oop_maps->add_gc_map(call_offset, map);
1190 restore_live_registers(sasm); 1109 restore_live_registers(sasm);
1191 __ leave(); 1110 __ leave();
1192 __ jr(RA); 1111 __ jr(RA);
1377 } 1296 }
1378 break; 1297 break;
1379 1298
1380 // case range_check_failed_id: 1299 // case range_check_failed_id:
1381 case throw_range_check_failed_id: 1300 case throw_range_check_failed_id:
1382 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1301 {
1302 StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1383 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 1303 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
1384 throw_range_check_exception),true); 1304 throw_range_check_exception),true);
1385 } 1305 }
1386 break; 1306 break;
1387 1307
1400 throw_div0_exception), false); 1320 throw_div0_exception), false);
1401 } 1321 }
1402 break; 1322 break;
1403 1323
1404 case throw_null_pointer_exception_id: 1324 case throw_null_pointer_exception_id:
1405 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1325 {
1326 StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1406 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 1327 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
1407 throw_null_pointer_exception),false); 1328 throw_null_pointer_exception),false);
1408 } 1329 }
1409 break; 1330 break;
1410 1331
1411 case handle_exception_nofpu_id: 1332 case handle_exception_nofpu_id:
1412 save_fpu_registers = false; 1333 save_fpu_registers = false;
1413 // fall through 1334 // fall through
1414 case handle_exception_id: 1335 case handle_exception_id:
1415 { 1336 {
1416
1417
1418 StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1337 StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1419
1420 //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); 1338 //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
1421 oop_maps = generate_handle_exception(id, sasm); 1339 oop_maps = generate_handle_exception(id, sasm);
1422 } 1340 }
1423 break; 1341 break;
1424 case handle_exception_from_callee_id: 1342 case handle_exception_from_callee_id:
1425 { 1343 {
1426 StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1344 StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1427 oop_maps = generate_handle_exception(id, sasm); 1345 oop_maps = generate_handle_exception(id, sasm);
1428 } 1346 }
1429 break; 1347 break;
1430 case unwind_exception_id: 1348 case unwind_exception_id:
1431 { 1349 {
1432 __ set_info("unwind_exception", dont_gc_arguments); 1350 __ set_info("unwind_exception", dont_gc_arguments);
1433
1434 generate_unwind_exception(sasm); 1351 generate_unwind_exception(sasm);
1435 } 1352 }
1436 break; 1353 break;
1437 1354
1438 1355
1439 case throw_array_store_exception_id: 1356 case throw_array_store_exception_id:
1440 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1357 {
1358 StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1441 // tos + 0: link 1359 // tos + 0: link
1442 // + 1: return address 1360 // + 1: return address
1443 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
1444 throw_array_store_exception), false); 1362 throw_array_store_exception), false);
1445 } 1363 }
1446 break; 1364 break;
1447 1365
1448 case throw_class_cast_exception_id: 1366 case throw_class_cast_exception_id:
1449 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1367 {
1368 StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1450 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 1369 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
1451 throw_class_cast_exception), V0); 1370 throw_class_cast_exception), true);
1452 } 1371 }
1453 break; 1372 break;
1454 1373
1455 case throw_incompatible_class_change_error_id: 1374 case throw_incompatible_class_change_error_id:
1456 { 1375 {
1457 StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 1376 StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1458 oop_maps = generate_exception_throw(sasm, 1377 oop_maps = generate_exception_throw(sasm,
1459 CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1378 CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1460 } 1379 }
1461 break; 1380 break;
1462 1381
1463 case slow_subtype_check_id: 1382 case slow_subtype_check_id:
1464 { 1383 {
1598 Runtime1::post_jvmti_exception_throw), V0); 1517 Runtime1::post_jvmti_exception_throw), V0);
1599 oop_maps = new OopMapSet(); 1518 oop_maps = new OopMapSet();
1600 oop_maps->add_gc_map(call_offset, map); 1519 oop_maps->add_gc_map(call_offset, map);
1601 restore_live_registers(sasm); 1520 restore_live_registers(sasm);
1602 }*/ 1521 }*/
1603 case load_mirror_patching_id: 1522 case load_mirror_patching_id:
1604 { 1523 {
1605 StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments); 1524 StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments);
1606 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1525 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1607 } 1526 }
1608 break; 1527 break;
1528
1529 case load_appendix_patching_id:
1530 {
1531 StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1532 // we should set up register map
1533 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1534 }
1535 break;
1536
1609 case dtrace_object_alloc_id: 1537 case dtrace_object_alloc_id:
1610 { 1538 {
1611 // V0:object 1539 // V0:object
1612 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1540 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1613 // we can't gc here so skip the oopmap but make sure that all 1541 // we can't gc here so skip the oopmap but make sure that all
1614 // the live registers get saved. 1542 // the live registers get saved.
1615 save_live_registers(sasm, 0); 1543 save_live_registers(sasm, 0);
1616 1544
1617 __ push_reg(V0); 1545 __ push_reg(V0);
1546 __ move(A0, V0);
1618 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), 1547 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
1619 relocInfo::runtime_call_type); 1548 relocInfo::runtime_call_type);
1620 __ super_pop(V0); 1549 __ super_pop(V0);
1621 1550
1622 restore_live_registers(sasm); 1551 restore_live_registers(sasm);
1623 } 1552 }
1624 break; 1553 break;
1554
1625 case fpu2long_stub_id: 1555 case fpu2long_stub_id:
1626 { 1556 {
1627 //FIXME, I hava no idea how to port this 1557 //FIXME, I hava no idea how to port this
1628 } 1558 //tty->print_cr("fpu2long_stub_id unimplemented yet!");
1559 }
1560 break;
1561
1562 case deoptimize_id:
1563 {
1564 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1565 const int num_rt_args = 1; // thread
1566 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1567 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1568 oop_maps = new OopMapSet();
1569 oop_maps->add_gc_map(call_offset, oop_map);
1570 restore_live_registers(sasm);
1571 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1572 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1573 __ leave();
1574 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
1575 }
1576 break;
1577
1578 case predicate_failed_trap_id:
1579 {
1580 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1581
1582 OopMap* map = save_live_registers(sasm, 1);
1583
1584 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1585 oop_maps = new OopMapSet();
1586 oop_maps->add_gc_map(call_offset, map);
1587 restore_live_registers(sasm);
1588 __ leave();
1589 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1590 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1591
1592 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
1593 }
1594 break;
1595
1629 default: 1596 default:
1630 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1597 {
1598 StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1631 __ move(A1, (int)id); 1599 __ move(A1, (int)id);
1632 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1); 1600 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1);
1633 __ should_not_reach_here(); 1601 __ should_not_reach_here();
1634 } 1602 }
1635 break; 1603 break;

mercurial