849 #ifdef _LP64 |
849 #ifdef _LP64 |
850 Register ld_off = Rdisp; |
850 Register ld_off = Rdisp; |
851 __ set(reg2offset(r_1) + extraspace + bias, ld_off); |
851 __ set(reg2offset(r_1) + extraspace + bias, ld_off); |
852 #else |
852 #else |
853 int ld_off = reg2offset(r_1) + extraspace + bias; |
853 int ld_off = reg2offset(r_1) + extraspace + bias; |
|
854 #endif // _LP64 |
854 #ifdef ASSERT |
855 #ifdef ASSERT |
855 G1_forced = true; |
856 G1_forced = true; |
856 #endif // ASSERT |
857 #endif // ASSERT |
857 #endif // _LP64 |
|
858 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle |
858 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle |
859 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); |
859 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); |
860 else __ ldx(base, ld_off, G1_scratch); |
860 else __ ldx(base, ld_off, G1_scratch); |
861 } |
861 } |
862 |
862 |
863 if (r_1->is_Register()) { |
863 if (r_1->is_Register()) { |
864 Register r = r_1->as_Register()->after_restore(); |
864 Register r = r_1->as_Register()->after_restore(); |
865 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { |
865 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { |
866 store_c2i_object(r, base, st_off); |
866 store_c2i_object(r, base, st_off); |
867 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
867 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
|
868 #ifndef _LP64 |
868 if (TieredCompilation) { |
869 if (TieredCompilation) { |
869 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs"); |
870 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs"); |
870 } |
871 } |
|
872 #endif // _LP64 |
871 store_c2i_long(r, base, st_off, r_2->is_stack()); |
873 store_c2i_long(r, base, st_off, r_2->is_stack()); |
872 } else { |
874 } else { |
873 store_c2i_int(r, base, st_off); |
875 store_c2i_int(r, base, st_off); |
874 } |
876 } |
875 } else { |
877 } else { |