src/cpu/mips/vm/sharedRuntime_mips_64.cpp

changeset 8009
0477693968a6
parent 7997
6cbff0651f1a
child 9043
7519f4bf92b5
equal deleted inserted replaced
8008:2c1fab4a6f4e 8009:0477693968a6
806 __ move(AT, -(StackAlignmentInBytes)); 806 __ move(AT, -(StackAlignmentInBytes));
807 __ andr(SP, SP, AT); 807 __ andr(SP, SP, AT);
808 // push the return address on the stack (note that pushing, rather 808 // push the return address on the stack (note that pushing, rather
809 // than storing it, yields the correct frame alignment for the callee) 809 // than storing it, yields the correct frame alignment for the callee)
810 // Put saved SP in another register 810 // Put saved SP in another register
811 // const Register saved_sp = eax;
812 const Register saved_sp = V0; 811 const Register saved_sp = V0;
813 __ move(saved_sp, T9); 812 __ move(saved_sp, T9);
814 813
815 814
816 // Will jump to the compiled code just as if compiled code was doing it. 815 // Will jump to the compiled code just as if compiled code was doing it.
1152 case T_DOUBLE: 1151 case T_DOUBLE:
1153 __ sdc1(FSF, FP, -wordSize ); 1152 __ sdc1(FSF, FP, -wordSize );
1154 break; 1153 break;
1155 case T_VOID: break; 1154 case T_VOID: break;
1156 case T_LONG: 1155 case T_LONG:
1157 __ sd(V0, FP, -wordSize); 1156 __ sd(V0, FP, -wordSize);
1158 break; 1157 break;
1159 case T_OBJECT: 1158 case T_OBJECT:
1160 case T_ARRAY: 1159 case T_ARRAY:
1161 __ sd(V0, FP, -wordSize); 1160 __ sd(V0, FP, -wordSize);
1162 break; 1161 break;
1163 default: { 1162 default: {
1164 __ sw(V0, FP, -wordSize); 1163 __ sw(V0, FP, -wordSize);
1165 } 1164 }
1166 } 1165 }
1167 } 1166 }
1168 1167
1169 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1168 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1170 // We always ignore the frame_slots arg and just use the space just below frame pointer 1169 // We always ignore the frame_slots arg and just use the space just below frame pointer
1183 case T_OBJECT: 1182 case T_OBJECT:
1184 case T_ARRAY: 1183 case T_ARRAY:
1185 __ ld(V0, FP, -wordSize); 1184 __ ld(V0, FP, -wordSize);
1186 break; 1185 break;
1187 default: { 1186 default: {
1188 __ lw(V0, FP, -wordSize); 1187 __ lw(V0, FP, -wordSize);
1189 } 1188 }
1190 } 1189 }
1191 } 1190 }
1192 1191
1193 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1192 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1194 for ( int i = first_arg ; i < arg_count ; i++ ) { 1193 for ( int i = first_arg ; i < arg_count ; i++ ) {
1575 // Now figure out where the args must be stored and how much stack space 1574 // Now figure out where the args must be stored and how much stack space
1576 // they require (neglecting out_preserve_stack_slots but space for storing 1575 // they require (neglecting out_preserve_stack_slots but space for storing
1577 // the 1st six register arguments). It's weird see int_stk_helper. 1576 // the 1st six register arguments). It's weird see int_stk_helper.
1578 // 1577 //
1579 int out_arg_slots; 1578 int out_arg_slots;
1580 //out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1581 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1579 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1582 1580
1583 // Compute framesize for the wrapper. We need to handlize all oops in 1581 // Compute framesize for the wrapper. We need to handlize all oops in
1584 // registers. We must create space for them here that is disjoint from 1582 // registers. We must create space for them here that is disjoint from
1585 // the windowed save area because we have no control over when we might 1583 // the windowed save area because we have no control over when we might
1763 // instruction fits that requirement. 1761 // instruction fits that requirement.
1764 1762
1765 // Generate stack overflow check 1763 // Generate stack overflow check
1766 1764
1767 if (UseStackBanging) { 1765 if (UseStackBanging) {
1768 //this function will modify the value in A0
1769 __ push(A0);
1770 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); 1766 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1771 __ pop(A0);
1772 } else { 1767 } else {
1773 // need a 5 byte instruction to allow MT safe patching to non-entrant 1768 // need a 5 byte instruction to allow MT safe patching to non-entrant
1774 __ nop(); 1769 __ nop();
1775 __ nop(); 1770 __ nop();
1776 __ nop(); 1771 __ nop();
1782 #ifndef OPT_THREAD 1777 #ifndef OPT_THREAD
1783 __ get_thread(TREG); 1778 __ get_thread(TREG);
1784 #endif 1779 #endif
1785 //FIXME here 1780 //FIXME here
1786 __ st_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset())); 1781 __ st_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
1787 // -2 because return address is already present and so is saved ebp
1788 __ move(AT, -(StackAlignmentInBytes)); 1782 __ move(AT, -(StackAlignmentInBytes));
1789 __ andr(SP, SP, AT); 1783 __ andr(SP, SP, AT);
1790 1784
1791 __ enter(); 1785 __ enter();
1786 // -2 because return address is already present and so is saved ebp
1792 __ addiu(SP, SP, -1 * (stack_size - 2*wordSize)); 1787 __ addiu(SP, SP, -1 * (stack_size - 2*wordSize));
1793 1788
1794 // Frame is now completed as far a size and linkage. 1789 // Frame is now completed as far a size and linkage.
1795 1790
1796 int frame_complete = ((intptr_t)__ pc()) - start; 1791 int frame_complete = ((intptr_t)__ pc()) - start;
2100 // expression: ((mark - esp) & (3 - os::vm_page_size())), 2095 // expression: ((mark - esp) & (3 - os::vm_page_size())),
2101 // assuming both stack pointer and pagesize have their 2096 // assuming both stack pointer and pagesize have their
2102 // least significant 2 bits clear. 2097 // least significant 2 bits clear.
2103 // NOTE: the oopMark is in swap_reg %eax as the result of cmpxchg 2098 // NOTE: the oopMark is in swap_reg %eax as the result of cmpxchg
2104 2099
2105 __ dsub(swap_reg, swap_reg,SP); 2100 __ dsub(swap_reg, swap_reg, SP);
2106 __ move(AT, 3 - os::vm_page_size()); 2101 __ move(AT, 3 - os::vm_page_size());
2107 __ andr(swap_reg , swap_reg, AT); 2102 __ andr(swap_reg , swap_reg, AT);
2108 // Save the test result, for recursive case, the result is zero 2103 // Save the test result, for recursive case, the result is zero
2109 __ sd(swap_reg, lock_reg, mark_word_offset); 2104 __ sd(swap_reg, lock_reg, mark_word_offset);
2110 //FIXME here, Why notEqual? 2105 //FIXME here, Why notEqual?
2111 __ bne(swap_reg,R0, slow_path_lock); 2106 __ bne(swap_reg, R0, slow_path_lock);
2112 __ delayed()->nop(); 2107 __ delayed()->nop();
2113 // Slow path will re-enter here 2108 // Slow path will re-enter here
2114 __ bind(lock_done); 2109 __ bind(lock_done);
2115 2110
2116 if (UseBiasedLocking) { 2111 if (UseBiasedLocking) {
2123 // Finally just about ready to make the JNI call 2118 // Finally just about ready to make the JNI call
2124 2119
2125 2120
2126 // get JNIEnv* which is first argument to native 2121 // get JNIEnv* which is first argument to native
2127 if (!is_critical_native) { 2122 if (!is_critical_native) {
2128 __ addi(A0, thread, in_bytes(JavaThread::jni_environment_offset())); 2123 __ addi(A0, thread, in_bytes(JavaThread::jni_environment_offset()));
2129 } 2124 }
2130 2125
2131 // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob) 2126 // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob)
2132 /* Load the second arguments into A1 */ 2127 /* Load the second arguments into A1 */
2133 //__ ld(A1, SP , wordSize ); // klass 2128 //__ ld(A1, SP , wordSize ); // klass
2166 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2161 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2167 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2162 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2168 // VM thread changes sync state to synchronizing and suspends threads for GC. 2163 // VM thread changes sync state to synchronizing and suspends threads for GC.
2169 // Thread A is resumed to finish this native method, but doesn't block here since it 2164 // Thread A is resumed to finish this native method, but doesn't block here since it
2170 // didn't see any synchronization is progress, and escapes. 2165 // didn't see any synchronization is progress, and escapes.
2171 // __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2172 //__ sw(_thread_in_native_trans, thread, JavaThread::thread_state_offset());
2173 // __ move(AT, (int)_thread_in_native_trans);
2174 __ addi(AT, R0, _thread_in_native_trans); 2166 __ addi(AT, R0, _thread_in_native_trans);
2175 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset())); 2167 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2176 2168
2169 //if(os::is_MP()) {}
2170
2177 Label after_transition; 2171 Label after_transition;
2178 2172
2179 // check for safepoint operation in progress and/or pending suspend requests 2173 // check for safepoint operation in progress and/or pending suspend requests
2180 { Label Continue; 2174 {
2181 //FIXME here, which regiser should we use? 2175 Label Continue;
2182 // SafepointSynchronize::_not_synchronized);
2183 __ li(AT, SafepointSynchronize::address_of_state()); 2176 __ li(AT, SafepointSynchronize::address_of_state());
2184 __ lw(A0, AT, 0); 2177 __ lw(A0, AT, 0);
2185 __ addi(AT, A0, -SafepointSynchronize::_not_synchronized); 2178 __ addi(AT, A0, -SafepointSynchronize::_not_synchronized);
2186 Label L; 2179 Label L;
2187 __ bne(AT,R0, L); 2180 __ bne(AT,R0, L);
2196 // Also can't use call_VM_leaf either as it will check to see if esi & edi are 2189 // Also can't use call_VM_leaf either as it will check to see if esi & edi are
2197 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 2190 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2198 // by hand. 2191 // by hand.
2199 // 2192 //
2200 save_native_result(masm, ret_type, stack_slots); 2193 save_native_result(masm, ret_type, stack_slots);
2201 __ move (A0, thread); 2194 __ move(A0, thread);
2202 __ addi(SP,SP, -wordSize); 2195 __ addi(SP, SP, -wordSize);
2203 __ push(S2); 2196 __ push(S2);
2204 __ move(AT, -(StackAlignmentInBytes)); 2197 __ move(AT, -(StackAlignmentInBytes));
2205 __ move(S2, SP); // use S2 as a sender SP holder 2198 __ move(S2, SP); // use S2 as a sender SP holder
2206 __ andr(SP, SP, AT); // align stack as required by ABI 2199 __ andr(SP, SP, AT); // align stack as required by ABI
2207 if (!is_critical_native) { 2200 if (!is_critical_native) {
2309 __ reset_last_Java_frame(false, true); 2302 __ reset_last_Java_frame(false, true);
2310 2303
2311 // Unpack oop result 2304 // Unpack oop result
2312 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2305 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2313 Label L; 2306 Label L;
2314 __ beq(V0, R0,L ); 2307 __ beq(V0, R0, L);
2315 __ delayed()->nop(); 2308 __ delayed()->nop();
2316 __ ld(V0, V0, 0); 2309 __ ld(V0, V0, 0);
2317 __ bind(L); 2310 __ bind(L);
2318 __ verify_oop(V0); 2311 __ verify_oop(V0);
2319 } 2312 }
2320 2313
2321 if (!is_critical_native) { 2314 if (!is_critical_native) {
2322 // reset handle block 2315 // reset handle block
2323 __ ld(AT, thread, in_bytes(JavaThread::active_handles_offset())); 2316 __ ld(AT, thread, in_bytes(JavaThread::active_handles_offset()));
2324 __ sw(R0, AT, JNIHandleBlock::top_offset_in_bytes()); 2317 __ sw(R0, AT, JNIHandleBlock::top_offset_in_bytes());
2325 } 2318 }
2326 2319
2327 if (!is_critical_native) { 2320 if (!is_critical_native) {
2328 // Any exception pending? 2321 // Any exception pending?
2329 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset())); 2322 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2330
2331 __ bne(AT, R0, exception_pending); 2323 __ bne(AT, R0, exception_pending);
2332 __ delayed()->nop(); 2324 __ delayed()->nop();
2333 } 2325 }
2334 // no exception, we're almost done 2326 // no exception, we're almost done
2335 2327
2338 2330
2339 // Return 2331 // Return
2340 #ifndef OPT_THREAD 2332 #ifndef OPT_THREAD
2341 __ get_thread(TREG); 2333 __ get_thread(TREG);
2342 #endif 2334 #endif
2343 __ ld_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset())); 2335 //__ ld_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
2344 __ leave(); 2336 __ leave();
2345 2337
2346 __ jr(RA); 2338 __ jr(RA);
2347 __ delayed()->nop(); 2339 __ delayed()->nop();
2348 // Unexpected paths are out of line and go here 2340 // Unexpected paths are out of line and go here
2469 2461
2470 // remove possible return value from FPU register stack 2462 // remove possible return value from FPU register stack
2471 __ empty_FPU_stack(); 2463 __ empty_FPU_stack();
2472 2464
2473 // pop our frame 2465 // pop our frame
2474 //forward_exception_entry need return address on stack 2466 //forward_exception_entry need return address on stack
2475 __ addiu(SP, FP, wordSize); 2467 __ addiu(SP, FP, wordSize);
2476 __ ld(FP, SP, (-1) * wordSize); 2468 __ ld(FP, SP, (-1) * wordSize);
2477 2469
2478 // and forward the exception 2470 // and forward the exception
2479 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 2471 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
2480 __ delayed()->nop(); 2472 __ delayed()->nop();
3706 // allocate space for the code 3698 // allocate space for the code
3707 ResourceMark rm; 3699 ResourceMark rm;
3708 3700
3709 //CodeBuffer buffer(name, 1000, 512); 3701 //CodeBuffer buffer(name, 1000, 512);
3710 //FIXME. aoqi. code_size 3702 //FIXME. aoqi. code_size
3711 CodeBuffer buffer(name, 20000, 2048); 3703 CodeBuffer buffer(name, 2000, 2048);
3712 MacroAssembler* masm = new MacroAssembler(&buffer); 3704 MacroAssembler* masm = new MacroAssembler(&buffer);
3713 3705
3714 int frame_size_words; 3706 int frame_size_words;
3715 //we put the thread in A0 3707 //we put the thread in A0
3716 3708

mercurial