src/cpu/mips/vm/mips_64.ad

changeset 390
d3aefa77da6c
parent 389
76857a2c3534
child 391
910b77f150c4
equal deleted inserted replaced
389:76857a2c3534 390:d3aefa77da6c
1702 int index = $mem$$index; 1702 int index = $mem$$index;
1703 int scale = $mem$$scale; 1703 int scale = $mem$$scale;
1704 int disp = $mem$$disp; 1704 int disp = $mem$$disp;
1705 1705
1706 if( index != 0 ) { 1706 if( index != 0 ) {
1707 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 1707 if( Assembler::is_simm16(disp) ) {
1708 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp); 1708 if( UseLoongsonISA ) {
1709 if (scale == 0) {
1710 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1711 } else {
1712 __ dsll(AT, as_Register(index), scale);
1713 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1714 }
1715 } else {
1716 if (scale == 0) {
1717 __ addu(AT, as_Register(base), as_Register(index));
1718 } else {
1719 __ dsll(AT, as_Register(index), scale);
1720 __ addu(AT, as_Register(base), AT);
1721 }
1722 __ lb(as_Register(dst), AT, disp);
1723 }
1724 } else {
1725 if (scale == 0) {
1726 __ addu(AT, as_Register(base), as_Register(index));
1727 } else {
1728 __ dsll(AT, as_Register(index), scale);
1729 __ addu(AT, as_Register(base), AT);
1730 }
1731 __ move(T9, disp);
1732 if( UseLoongsonISA ) {
1733 __ gslbx(as_Register(dst), AT, T9, 0);
1734 } else {
1735 __ addu(AT, AT, T9);
1736 __ lb(as_Register(dst), AT, 0);
1737 }
1738 }
1709 } else { 1739 } else {
1710 __ lb(as_Register(dst), as_Register(base), disp); 1740 if( Assembler::is_simm16(disp) ) {
1741 __ lb(as_Register(dst), as_Register(base), disp);
1742 } else {
1743 __ move(T9, disp);
1744 if( UseLoongsonISA ) {
1745 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1746 } else {
1747 __ addu(AT, as_Register(base), T9);
1748 __ lb(as_Register(dst), AT, 0);
1749 }
1750 }
1711 } 1751 }
1712 %} 1752 %}
1713 1753
1714 //Load byte unsigned 1754 //Load byte unsigned
1715 enc_class load_UB_enc (mRegI dst, umemory mem) %{ 1755 enc_class load_UB_enc (mRegI dst, memory mem) %{
1716 MacroAssembler _masm(&cbuf); 1756 MacroAssembler _masm(&cbuf);
1717 int dst = $dst$$reg; 1757 int dst = $dst$$reg;
1718 int base = $mem$$base; 1758 int base = $mem$$base;
1719 int index = $mem$$index; 1759 int index = $mem$$index;
1720 int scale = $mem$$scale; 1760 int scale = $mem$$scale;
1721 int disp = $mem$$disp; 1761 int disp = $mem$$disp;
1722 1762
1723 assert(index == 0, "no index"); 1763 if( index != 0 ) {
1724 __ lbu(as_Register(dst), as_Register(base), disp); 1764 if (scale == 0) {
1765 __ daddu(AT, as_Register(base), as_Register(index));
1766 } else {
1767 __ dsll(AT, as_Register(index), scale);
1768 __ daddu(AT, as_Register(base), AT);
1769 }
1770 if( Assembler::is_simm16(disp) ) {
1771 __ lbu(as_Register(dst), AT, disp);
1772 } else {
1773 __ move(T9, disp);
1774 __ daddu(AT, AT, T9);
1775 __ lbu(as_Register(dst), AT, 0);
1776 }
1777 } else {
1778 if( Assembler::is_simm16(disp) ) {
1779 __ lbu(as_Register(dst), as_Register(base), disp);
1780 } else {
1781 __ move(T9, disp);
1782 __ daddu(AT, as_Register(base), T9);
1783 __ lbu(as_Register(dst), AT, 0);
1784 }
1785 }
1725 %} 1786 %}
1726 1787
1727 enc_class store_B_reg_enc (memory mem, mRegI src) %{ 1788 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1728 MacroAssembler _masm(&cbuf); 1789 MacroAssembler _masm(&cbuf);
1729 int src = $src$$reg; 1790 int src = $src$$reg;
1731 int index = $mem$$index; 1792 int index = $mem$$index;
1732 int scale = $mem$$scale; 1793 int scale = $mem$$scale;
1733 int disp = $mem$$disp; 1794 int disp = $mem$$disp;
1734 1795
1735 if( index != 0 ) { 1796 if( index != 0 ) {
1736 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 1797 if (scale == 0) {
1737 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp); 1798 if( Assembler::is_simm(disp, 8) ) {
1799 if (UseLoongsonISA) {
1800 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1801 } else {
1802 __ addu(AT, as_Register(base), as_Register(index));
1803 __ sb(as_Register(src), AT, disp);
1804 }
1805 } else if( Assembler::is_simm16(disp) ) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 __ sb(as_Register(src), AT, disp);
1808 } else {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 __ move(T9, disp);
1811 if (UseLoongsonISA) {
1812 __ gssbx(as_Register(src), AT, T9, 0);
1813 } else {
1814 __ addu(AT, AT, T9);
1815 __ sb(as_Register(src), AT, 0);
1816 }
1817 }
1818 } else {
1819 __ dsll(AT, as_Register(index), scale);
1820 if( Assembler::is_simm(disp, 8) ) {
1821 if (UseLoongsonISA) {
1822 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1823 } else {
1824 __ addu(AT, as_Register(base), AT);
1825 __ sb(as_Register(src), AT, disp);
1826 }
1827 } else if( Assembler::is_simm16(disp) ) {
1828 __ addu(AT, as_Register(base), AT);
1829 __ sb(as_Register(src), AT, disp);
1830 } else {
1831 __ addu(AT, as_Register(base), AT);
1832 __ move(T9, disp);
1833 if (UseLoongsonISA) {
1834 __ gssbx(as_Register(src), AT, T9, 0);
1835 } else {
1836 __ addu(AT, AT, T9);
1837 __ sb(as_Register(src), AT, 0);
1838 }
1839 }
1840 }
1738 } else { 1841 } else {
1739 __ sb(as_Register(src), as_Register(base), disp); 1842 if( Assembler::is_simm16(disp) ) {
1843 __ sb(as_Register(src), as_Register(base), disp);
1844 } else {
1845 __ move(T9, disp);
1846 if (UseLoongsonISA) {
1847 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1848 } else {
1849 __ addu(AT, as_Register(base), T9);
1850 __ sb(as_Register(src), AT, 0);
1851 }
1852 }
1740 } 1853 }
1741 %} 1854 %}
1742 1855
1743 enc_class store_B0_enc (memory mem) %{ 1856 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1744 MacroAssembler _masm(&cbuf); 1857 MacroAssembler _masm(&cbuf);
1745 int base = $mem$$base; 1858 int base = $mem$$base;
1746 int index = $mem$$index; 1859 int index = $mem$$index;
1747 int scale = $mem$$scale; 1860 int scale = $mem$$scale;
1748 int disp = $mem$$disp; 1861 int disp = $mem$$disp;
1862 int value = $src$$constant;
1749 1863
1750 if( index != 0 ) { 1864 if( index != 0 ) {
1751 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 1865 if (!UseLoongsonISA) {
1752 __ gssbx(R0, as_Register(base), as_Register(index), disp); 1866 if (scale == 0) {
1867 __ daddu(AT, as_Register(base), as_Register(index));
1868 } else {
1869 __ dsll(AT, as_Register(index), scale);
1870 __ daddu(AT, as_Register(base), AT);
1871 }
1872 if( Assembler::is_simm16(disp) ) {
1873 if (value == 0) {
1874 __ sb(R0, AT, disp);
1875 } else {
1876 __ move(T9, value);
1877 __ sb(T9, AT, disp);
1878 }
1879 } else {
1880 if (value == 0) {
1881 __ move(T9, disp);
1882 __ daddu(AT, AT, T9);
1883 __ sb(R0, AT, 0);
1884 } else {
1885 __ move(T9, disp);
1886 __ daddu(AT, AT, T9);
1887 __ move(T9, value);
1888 __ sb(T9, AT, 0);
1889 }
1890 }
1891 } else {
1892
1893 if (scale == 0) {
1894 if( Assembler::is_simm(disp, 8) ) {
1895 if (value == 0) {
1896 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1897 } else {
1898 __ move(T9, value);
1899 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1900 }
1901 } else if( Assembler::is_simm16(disp) ) {
1902 __ daddu(AT, as_Register(base), as_Register(index));
1903 if (value == 0) {
1904 __ sb(R0, AT, disp);
1905 } else {
1906 __ move(T9, value);
1907 __ sb(T9, AT, disp);
1908 }
1909 } else {
1910 if (value == 0) {
1911 __ daddu(AT, as_Register(base), as_Register(index));
1912 __ move(T9, disp);
1913 __ gssbx(R0, AT, T9, 0);
1914 } else {
1915 __ move(AT, disp);
1916 __ move(T9, value);
1917 __ daddu(AT, as_Register(base), AT);
1918 __ gssbx(T9, AT, as_Register(index), 0);
1919 }
1920 }
1921
1922 } else {
1923
1924 if( Assembler::is_simm(disp, 8) ) {
1925 __ dsll(AT, as_Register(index), scale);
1926 if (value == 0) {
1927 __ gssbx(R0, as_Register(base), AT, disp);
1928 } else {
1929 __ move(T9, value);
1930 __ gssbx(T9, as_Register(base), AT, disp);
1931 }
1932 } else if( Assembler::is_simm16(disp) ) {
1933 __ dsll(AT, as_Register(index), scale);
1934 __ daddu(AT, as_Register(base), AT);
1935 if (value == 0) {
1936 __ sb(R0, AT, disp);
1937 } else {
1938 __ move(T9, value);
1939 __ sb(T9, AT, disp);
1940 }
1941 } else {
1942 __ dsll(AT, as_Register(index), scale);
1943 if (value == 0) {
1944 __ daddu(AT, as_Register(base), AT);
1945 __ move(T9, disp);
1946 __ gssbx(R0, AT, T9, 0);
1947 } else {
1948 __ move(T9, disp);
1949 __ daddu(AT, AT, T9);
1950 __ move(T9, value);
1951 __ gssbx(T9, as_Register(base), AT, 0);
1952 }
1953 }
1954 }
1955 }
1753 } else { 1956 } else {
1754 __ sb(R0, as_Register(base), disp); 1957 if( Assembler::is_simm16(disp) ) {
1958 if (value == 0) {
1959 __ sb(R0, as_Register(base), disp);
1960 } else {
1961 __ move(AT, value);
1962 __ sb(AT, as_Register(base), disp);
1963 }
1964 } else {
1965 if (value == 0) {
1966 __ move(T9, disp);
1967 if (UseLoongsonISA) {
1968 __ gssbx(R0, as_Register(base), T9, 0);
1969 } else {
1970 __ daddu(AT, as_Register(base), T9);
1971 __ sb(R0, AT, 0);
1972 }
1973 } else {
1974 __ move(T9, disp);
1975 if (UseLoongsonISA) {
1976 __ move(AT, value);
1977 __ gssbx(AT, as_Register(base), T9, 0);
1978 } else {
1979 __ daddu(AT, as_Register(base), T9);
1980 __ move(T9, value);
1981 __ sb(T9, AT, 0);
1982 }
1983 }
1984 }
1755 } 1985 }
1756 %} 1986 %}
1757 1987
1758 enc_class store_B_reg_sync_enc (memory mem, mRegI src) %{ 1988
1759 MacroAssembler _masm(&cbuf); 1989 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1760 int src = $src$$reg;
1761 int base = $mem$$base;
1762 int index = $mem$$index;
1763 int scale = $mem$$scale;
1764 int disp = $mem$$disp;
1765
1766 if( index != 0 ) {
1767 assert(UseLoongsonISA, "Only supported for Loongson CPUs");
1768 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1769 } else {
1770 __ sb(as_Register(src), as_Register(base), disp);
1771 }
1772 __ sync();
1773 %}
1774
1775 enc_class store_B0_sync_enc (memory mem) %{
1776 MacroAssembler _masm(&cbuf); 1990 MacroAssembler _masm(&cbuf);
1777 int base = $mem$$base; 1991 int base = $mem$$base;
1778 int index = $mem$$index; 1992 int index = $mem$$index;
1779 int scale = $mem$$scale; 1993 int scale = $mem$$scale;
1780 int disp = $mem$$disp; 1994 int disp = $mem$$disp;
1995 int value = $src$$constant;
1781 1996
1782 if( index != 0 ) { 1997 if( index != 0 ) {
1783 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 1998 if ( UseLoongsonISA ) {
1784 __ gssbx(R0, as_Register(base), as_Register(index), disp); 1999 if ( Assembler::is_simm(disp,8) ) {
2000 if ( scale == 0 ) {
2001 if ( value == 0 ) {
2002 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2003 } else {
2004 __ move(AT, value);
2005 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2006 }
2007 } else {
2008 __ dsll(AT, as_Register(index), scale);
2009 if ( value == 0 ) {
2010 __ gssbx(R0, as_Register(base), AT, disp);
2011 } else {
2012 __ move(T9, value);
2013 __ gssbx(T9, as_Register(base), AT, disp);
2014 }
2015 }
2016 } else if ( Assembler::is_simm16(disp) ) {
2017 if ( scale == 0 ) {
2018 __ daddu(AT, as_Register(base), as_Register(index));
2019 if ( value == 0 ){
2020 __ sb(R0, AT, disp);
2021 } else {
2022 __ move(T9, value);
2023 __ sb(T9, AT, disp);
2024 }
2025 } else {
2026 __ dsll(AT, as_Register(index), scale);
2027 __ daddu(AT, as_Register(base), AT);
2028 if ( value == 0 ) {
2029 __ sb(R0, AT, disp);
2030 } else {
2031 __ move(T9, value);
2032 __ sb(T9, AT, disp);
2033 }
2034 }
2035 } else {
2036 if ( scale == 0 ) {
2037 __ move(AT, disp);
2038 __ daddu(AT, as_Register(index), AT);
2039 if ( value == 0 ) {
2040 __ gssbx(R0, as_Register(base), AT, 0);
2041 } else {
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 } else {
2046 __ dsll(AT, as_Register(index), scale);
2047 __ move(T9, disp);
2048 __ daddu(AT, AT, T9);
2049 if ( value == 0 ) {
2050 __ gssbx(R0, as_Register(base), AT, 0);
2051 } else {
2052 __ move(T9, value);
2053 __ gssbx(T9, as_Register(base), AT, 0);
2054 }
2055 }
2056 }
2057 } else { //not use loongson isa
2058 if (scale == 0) {
2059 __ daddu(AT, as_Register(base), as_Register(index));
2060 } else {
2061 __ dsll(AT, as_Register(index), scale);
2062 __ daddu(AT, as_Register(base), AT);
2063 }
2064 if( Assembler::is_simm16(disp) ) {
2065 if (value == 0) {
2066 __ sb(R0, AT, disp);
2067 } else {
2068 __ move(T9, value);
2069 __ sb(T9, AT, disp);
2070 }
2071 } else {
2072 if (value == 0) {
2073 __ move(T9, disp);
2074 __ daddu(AT, AT, T9);
2075 __ sb(R0, AT, 0);
2076 } else {
2077 __ move(T9, disp);
2078 __ daddu(AT, AT, T9);
2079 __ move(T9, value);
2080 __ sb(T9, AT, 0);
2081 }
2082 }
2083 }
1785 } else { 2084 } else {
1786 __ sb(R0, as_Register(base), disp); 2085 if ( UseLoongsonISA ){
2086 if ( Assembler::is_simm16(disp) ){
2087 if ( value == 0 ) {
2088 __ sb(R0, as_Register(base), disp);
2089 } else {
2090 __ move(AT, value);
2091 __ sb(AT, as_Register(base), disp);
2092 }
2093 } else {
2094 __ move(AT, disp);
2095 if ( value == 0 ) {
2096 __ gssbx(R0, as_Register(base), AT, 0);
2097 } else {
2098 __ move(T9, value);
2099 __ gssbx(T9, as_Register(base), AT, 0);
2100 }
2101 }
2102 } else {
2103 if( Assembler::is_simm16(disp) ) {
2104 if (value == 0) {
2105 __ sb(R0, as_Register(base), disp);
2106 } else {
2107 __ move(AT, value);
2108 __ sb(AT, as_Register(base), disp);
2109 }
2110 } else {
2111 if (value == 0) {
2112 __ move(T9, disp);
2113 __ daddu(AT, as_Register(base), T9);
2114 __ sb(R0, AT, 0);
2115 } else {
2116 __ move(T9, disp);
2117 __ daddu(AT, as_Register(base), T9);
2118 __ move(T9, value);
2119 __ sb(T9, AT, 0);
2120 }
2121 }
2122 }
1787 } 2123 }
2124
1788 __ sync(); 2125 __ sync();
1789 %} 2126 %}
1790 2127
1791 // Load Short (16bit signed) 2128 // Load Short (16bit signed)
1792 enc_class load_S_enc (mRegI dst, memory mem) %{ 2129 enc_class load_S_enc (mRegI dst, memory mem) %{
1796 int index = $mem$$index; 2133 int index = $mem$$index;
1797 int scale = $mem$$scale; 2134 int scale = $mem$$scale;
1798 int disp = $mem$$disp; 2135 int disp = $mem$$disp;
1799 2136
1800 if( index != 0 ) { 2137 if( index != 0 ) {
1801 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2138 if ( UseLoongsonISA ) {
1802 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp); 2139 if ( Assembler::is_simm(disp, 8) ) {
1803 } else { 2140 if (scale == 0) {
1804 __ lh(as_Register(dst), as_Register(base), disp); 2141 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2142 } else {
2143 __ dsll(AT, as_Register(index), scale);
2144 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2145 }
2146 } else if ( Assembler::is_simm16(disp) ) {
2147 if (scale == 0) {
2148 __ daddu(AT, as_Register(base), as_Register(index));
2149 __ lh(as_Register(dst), AT, disp);
2150 } else {
2151 __ dsll(AT, as_Register(index), scale);
2152 __ daddu(AT, as_Register(base), AT);
2153 __ lh(as_Register(dst), AT, disp);
2154 }
2155 } else {
2156 if (scale == 0) {
2157 __ move(AT, disp);
2158 __ daddu(AT, as_Register(index), AT);
2159 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2160 } else {
2161 __ dsll(AT, as_Register(index), scale);
2162 __ move(T9, disp);
2163 __ daddu(AT, AT, T9);
2164 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2165 }
2166 }
2167 } else { // not use loongson isa
2168 if (scale == 0) {
2169 __ daddu(AT, as_Register(base), as_Register(index));
2170 } else {
2171 __ dsll(AT, as_Register(index), scale);
2172 __ daddu(AT, as_Register(base), AT);
2173 }
2174 if( Assembler::is_simm16(disp) ) {
2175 __ lh(as_Register(dst), AT, disp);
2176 } else {
2177 __ move(T9, disp);
2178 __ daddu(AT, AT, T9);
2179 __ lh(as_Register(dst), AT, 0);
2180 }
2181 }
2182 } else { // index is 0
2183 if ( UseLoongsonISA ) {
2184 if ( Assembler::is_simm16(disp) ) {
2185 __ lh(as_Register(dst), as_Register(base), disp);
2186 } else {
2187 __ move(T9, disp);
2188 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2189 }
2190 } else { //not use loongson isa
2191 if( Assembler::is_simm16(disp) ) {
2192 __ lh(as_Register(dst), as_Register(base), disp);
2193 } else {
2194 __ move(T9, disp);
2195 __ daddu(AT, as_Register(base), T9);
2196 __ lh(as_Register(dst), AT, 0);
2197 }
2198 }
1805 } 2199 }
1806 %} 2200 %}
1807 2201
1808 // Load Char (16bit unsigned) 2202 // Load Char (16bit unsigned)
1809 enc_class load_C_enc (mRegI dst, umemory mem) %{ 2203 enc_class load_C_enc (mRegI dst, memory mem) %{
1810 MacroAssembler _masm(&cbuf); 2204 MacroAssembler _masm(&cbuf);
1811 int dst = $dst$$reg; 2205 int dst = $dst$$reg;
1812 int base = $mem$$base; 2206 int base = $mem$$base;
1813 int index = $mem$$index; 2207 int index = $mem$$index;
1814 int scale = $mem$$scale; 2208 int scale = $mem$$scale;
1815 int disp = $mem$$disp; 2209 int disp = $mem$$disp;
1816 2210
1817 assert(index == 0, "no index"); 2211 if( index != 0 ) {
1818 __ lhu(as_Register(dst), as_Register(base), disp); 2212 if (scale == 0) {
2213 __ daddu(AT, as_Register(base), as_Register(index));
2214 } else {
2215 __ dsll(AT, as_Register(index), scale);
2216 __ daddu(AT, as_Register(base), AT);
2217 }
2218 if( Assembler::is_simm16(disp) ) {
2219 __ lhu(as_Register(dst), AT, disp);
2220 } else {
2221 __ move(T9, disp);
2222 __ addu(AT, AT, T9);
2223 __ lhu(as_Register(dst), AT, 0);
2224 }
2225 } else {
2226 if( Assembler::is_simm16(disp) ) {
2227 __ lhu(as_Register(dst), as_Register(base), disp);
2228 } else {
2229 __ move(T9, disp);
2230 __ daddu(AT, as_Register(base), T9);
2231 __ lhu(as_Register(dst), AT, 0);
2232 }
2233 }
1819 %} 2234 %}
1820 2235
1821 // Store Char (16bit unsigned) 2236 // Store Char (16bit unsigned)
1822 enc_class store_C_reg_enc (memory mem, mRegI src) %{ 2237 enc_class store_C_reg_enc (memory mem, mRegI src) %{
1823 MacroAssembler _masm(&cbuf); 2238 MacroAssembler _masm(&cbuf);
1826 int index = $mem$$index; 2241 int index = $mem$$index;
1827 int scale = $mem$$scale; 2242 int scale = $mem$$scale;
1828 int disp = $mem$$disp; 2243 int disp = $mem$$disp;
1829 2244
1830 if( index != 0 ) { 2245 if( index != 0 ) {
1831 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2246 if( Assembler::is_simm16(disp) ) {
1832 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp); 2247 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2248 if (scale == 0) {
2249 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2253 }
2254 } else {
2255 if (scale == 0) {
2256 __ addu(AT, as_Register(base), as_Register(index));
2257 } else {
2258 __ dsll(AT, as_Register(index), scale);
2259 __ addu(AT, as_Register(base), AT);
2260 }
2261 __ sh(as_Register(src), AT, disp);
2262 }
2263 } else {
2264 if (scale == 0) {
2265 __ addu(AT, as_Register(base), as_Register(index));
2266 } else {
2267 __ dsll(AT, as_Register(index), scale);
2268 __ addu(AT, as_Register(base), AT);
2269 }
2270 __ move(T9, disp);
2271 if( UseLoongsonISA ) {
2272 __ gsshx(as_Register(src), AT, T9, 0);
2273 } else {
2274 __ addu(AT, AT, T9);
2275 __ sh(as_Register(src), AT, 0);
2276 }
2277 }
1833 } else { 2278 } else {
1834 __ sh(as_Register(src), as_Register(base), disp); 2279 if( Assembler::is_simm16(disp) ) {
2280 __ sh(as_Register(src), as_Register(base), disp);
2281 } else {
2282 __ move(T9, disp);
2283 if( UseLoongsonISA ) {
2284 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2285 } else {
2286 __ addu(AT, as_Register(base), T9);
2287 __ sh(as_Register(src), AT, 0);
2288 }
2289 }
1835 } 2290 }
1836 %} 2291 %}
1837 2292
1838 enc_class store_C0_enc (memory mem) %{ 2293 enc_class store_C0_enc (memory mem) %{
1839 MacroAssembler _masm(&cbuf); 2294 MacroAssembler _masm(&cbuf);
1841 int index = $mem$$index; 2296 int index = $mem$$index;
1842 int scale = $mem$$scale; 2297 int scale = $mem$$scale;
1843 int disp = $mem$$disp; 2298 int disp = $mem$$disp;
1844 2299
1845 if( index != 0 ) { 2300 if( index != 0 ) {
1846 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2301 if( Assembler::is_simm16(disp) ) {
1847 __ gsshx(R0, as_Register(base), as_Register(index), disp); 2302 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2303 if (scale == 0) {
2304 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ gsshx(R0, as_Register(base), AT, disp);
2308 }
2309 } else {
2310 if (scale == 0) {
2311 __ addu(AT, as_Register(base), as_Register(index));
2312 } else {
2313 __ dsll(AT, as_Register(index), scale);
2314 __ addu(AT, as_Register(base), AT);
2315 }
2316 __ sh(R0, AT, disp);
2317 }
2318 } else {
2319 if (scale == 0) {
2320 __ addu(AT, as_Register(base), as_Register(index));
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ addu(AT, as_Register(base), AT);
2324 }
2325 __ move(T9, disp);
2326 if( UseLoongsonISA ) {
2327 __ gsshx(R0, AT, T9, 0);
2328 } else {
2329 __ addu(AT, AT, T9);
2330 __ sh(R0, AT, 0);
2331 }
2332 }
1848 } else { 2333 } else {
1849 __ sh(R0, as_Register(base), disp); 2334 if( Assembler::is_simm16(disp) ) {
2335 __ sh(R0, as_Register(base), disp);
2336 } else {
2337 __ move(T9, disp);
2338 if( UseLoongsonISA ) {
2339 __ gsshx(R0, as_Register(base), T9, 0);
2340 } else {
2341 __ addu(AT, as_Register(base), T9);
2342 __ sh(R0, AT, 0);
2343 }
2344 }
1850 } 2345 }
1851 %} 2346 %}
1852 2347
1853 enc_class load_I_enc (mRegI dst, memory mem) %{ 2348 enc_class load_I_enc (mRegI dst, memory mem) %{
1854 MacroAssembler _masm(&cbuf); 2349 MacroAssembler _masm(&cbuf);
1857 int index = $mem$$index; 2352 int index = $mem$$index;
1858 int scale = $mem$$scale; 2353 int scale = $mem$$scale;
1859 int disp = $mem$$disp; 2354 int disp = $mem$$disp;
1860 2355
1861 if( index != 0 ) { 2356 if( index != 0 ) {
1862 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2357 if( Assembler::is_simm16(disp) ) {
1863 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp); 2358 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2359 if (scale == 0) {
2360 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2361 } else {
2362 __ dsll(AT, as_Register(index), scale);
2363 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2364 }
2365 } else {
2366 if (scale == 0) {
2367 __ addu(AT, as_Register(base), as_Register(index));
2368 } else {
2369 __ dsll(AT, as_Register(index), scale);
2370 __ addu(AT, as_Register(base), AT);
2371 }
2372 __ lw(as_Register(dst), AT, disp);
2373 }
2374 } else {
2375 if (scale == 0) {
2376 __ addu(AT, as_Register(base), as_Register(index));
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ addu(AT, as_Register(base), AT);
2380 }
2381 __ move(T9, disp);
2382 if( UseLoongsonISA ) {
2383 __ gslwx(as_Register(dst), AT, T9, 0);
2384 } else {
2385 __ addu(AT, AT, T9);
2386 __ lw(as_Register(dst), AT, 0);
2387 }
2388 }
1864 } else { 2389 } else {
1865 __ lw(as_Register(dst), as_Register(base), disp); 2390 if( Assembler::is_simm16(disp) ) {
2391 __ lw(as_Register(dst), as_Register(base), disp);
2392 } else {
2393 __ move(T9, disp);
2394 if( UseLoongsonISA ) {
2395 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2396 } else {
2397 __ addu(AT, as_Register(base), T9);
2398 __ lw(as_Register(dst), AT, 0);
2399 }
2400 }
1866 } 2401 }
1867 %} 2402 %}
1868 2403
1869 enc_class store_I_reg_enc (memory mem, mRegI src) %{ 2404 enc_class store_I_reg_enc (memory mem, mRegI src) %{
1870 MacroAssembler _masm(&cbuf); 2405 MacroAssembler _masm(&cbuf);
1873 int index = $mem$$index; 2408 int index = $mem$$index;
1874 int scale = $mem$$scale; 2409 int scale = $mem$$scale;
1875 int disp = $mem$$disp; 2410 int disp = $mem$$disp;
1876 2411
1877 if( index != 0 ) { 2412 if( index != 0 ) {
1878 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2413 if( Assembler::is_simm16(disp) ) {
1879 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); 2414 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2415 if (scale == 0) {
2416 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2417 } else {
2418 __ dsll(AT, as_Register(index), scale);
2419 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2420 }
2421 } else {
2422 if (scale == 0) {
2423 __ addu(AT, as_Register(base), as_Register(index));
2424 } else {
2425 __ dsll(AT, as_Register(index), scale);
2426 __ addu(AT, as_Register(base), AT);
2427 }
2428 __ sw(as_Register(src), AT, disp);
2429 }
2430 } else {
2431 if (scale == 0) {
2432 __ addu(AT, as_Register(base), as_Register(index));
2433 } else {
2434 __ dsll(AT, as_Register(index), scale);
2435 __ addu(AT, as_Register(base), AT);
2436 }
2437 __ move(T9, disp);
2438 if( UseLoongsonISA ) {
2439 __ gsswx(as_Register(src), AT, T9, 0);
2440 } else {
2441 __ addu(AT, AT, T9);
2442 __ sw(as_Register(src), AT, 0);
2443 }
2444 }
1880 } else { 2445 } else {
1881 __ sw(as_Register(src), as_Register(base), disp); 2446 if( Assembler::is_simm16(disp) ) {
2447 __ sw(as_Register(src), as_Register(base), disp);
2448 } else {
2449 __ move(T9, disp);
2450 if( UseLoongsonISA ) {
2451 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2452 } else {
2453 __ addu(AT, as_Register(base), T9);
2454 __ sw(as_Register(src), AT, 0);
2455 }
2456 }
1882 } 2457 }
1883 %} 2458 %}
1884 2459
1885 enc_class store_I_immI0_enc (memory mem) %{ 2460 enc_class store_I_immI_enc (memory mem, immI src) %{
1886 MacroAssembler _masm(&cbuf); 2461 MacroAssembler _masm(&cbuf);
1887 int base = $mem$$base; 2462 int base = $mem$$base;
1888 int index = $mem$$index; 2463 int index = $mem$$index;
1889 int scale = $mem$$scale; 2464 int scale = $mem$$scale;
1890 int disp = $mem$$disp; 2465 int disp = $mem$$disp;
2466 int value = $src$$constant;
1891 2467
1892 if( index != 0 ) { 2468 if( index != 0 ) {
1893 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2469 if ( UseLoongsonISA ) {
1894 __ gsswx(R0, as_Register(base), as_Register(index), disp); 2470 if ( Assembler::is_simm(disp, 8) ) {
2471 if ( scale == 0 ) {
2472 if ( value == 0 ) {
2473 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2474 } else {
2475 __ move(T9, value);
2476 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2477 }
2478 } else {
2479 __ dsll(AT, as_Register(index), scale);
2480 if ( value == 0 ) {
2481 __ gsswx(R0, as_Register(base), AT, disp);
2482 } else {
2483 __ move(T9, value);
2484 __ gsswx(T9, as_Register(base), AT, disp);
2485 }
2486 }
2487 } else if ( Assembler::is_simm16(disp) ) {
2488 if ( scale == 0 ) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 if ( value == 0 ) {
2491 __ sw(R0, AT, disp);
2492 } else {
2493 __ move(T9, value);
2494 __ sw(T9, AT, disp);
2495 }
2496 } else {
2497 __ dsll(AT, as_Register(index), scale);
2498 __ daddu(AT, as_Register(base), AT);
2499 if ( value == 0 ) {
2500 __ sw(R0, AT, disp);
2501 } else {
2502 __ move(T9, value);
2503 __ sw(T9, AT, disp);
2504 }
2505 }
2506 } else {
2507 if ( scale == 0 ) {
2508 __ move(T9, disp);
2509 __ daddu(AT, as_Register(index), T9);
2510 if ( value ==0 ) {
2511 __ gsswx(R0, as_Register(base), AT, 0);
2512 } else {
2513 __ move(T9, value);
2514 __ gsswx(T9, as_Register(base), AT, 0);
2515 }
2516 } else {
2517 __ dsll(AT, as_Register(index), scale);
2518 __ move(T9, disp);
2519 __ daddu(AT, AT, T9);
2520 if ( value == 0 ) {
2521 __ gsswx(R0, as_Register(base), AT, 0);
2522 } else {
2523 __ move(T9, value);
2524 __ gsswx(T9, as_Register(base), AT, 0);
2525 }
2526 }
2527 }
2528 } else { //not use loongson isa
2529 if (scale == 0) {
2530 __ daddu(AT, as_Register(base), as_Register(index));
2531 } else {
2532 __ dsll(AT, as_Register(index), scale);
2533 __ daddu(AT, as_Register(base), AT);
2534 }
2535 if( Assembler::is_simm16(disp) ) {
2536 if (value == 0) {
2537 __ sw(R0, AT, disp);
2538 } else {
2539 __ move(T9, value);
2540 __ sw(T9, AT, disp);
2541 }
2542 } else {
2543 if (value == 0) {
2544 __ move(T9, disp);
2545 __ daddu(AT, AT, T9);
2546 __ sw(R0, AT, 0);
2547 } else {
2548 __ move(T9, disp);
2549 __ daddu(AT, AT, T9);
2550 __ move(T9, value);
2551 __ sw(T9, AT, 0);
2552 }
2553 }
2554 }
1895 } else { 2555 } else {
1896 __ sw(R0, as_Register(base), disp); 2556 if ( UseLoongsonISA ) {
2557 if ( Assembler::is_simm16(disp) ) {
2558 if ( value == 0 ) {
2559 __ sw(R0, as_Register(base), disp);
2560 } else {
2561 __ move(AT, value);
2562 __ sw(AT, as_Register(base), disp);
2563 }
2564 } else {
2565 __ move(T9, disp);
2566 if ( value == 0 ) {
2567 __ gsswx(R0, as_Register(base), T9, 0);
2568 } else {
2569 __ move(AT, value);
2570 __ gsswx(AT, as_Register(base), T9, 0);
2571 }
2572 }
2573 } else {
2574 if( Assembler::is_simm16(disp) ) {
2575 if (value == 0) {
2576 __ sw(R0, as_Register(base), disp);
2577 } else {
2578 __ move(AT, value);
2579 __ sw(AT, as_Register(base), disp);
2580 }
2581 } else {
2582 if (value == 0) {
2583 __ move(T9, disp);
2584 __ daddu(AT, as_Register(base), T9);
2585 __ sw(R0, AT, 0);
2586 } else {
2587 __ move(T9, disp);
2588 __ daddu(AT, as_Register(base), T9);
2589 __ move(T9, value);
2590 __ sw(T9, AT, 0);
2591 }
2592 }
2593 }
1897 } 2594 }
1898 %} 2595 %}
1899 2596
1900 enc_class load_N_enc (mRegN dst, umemory mem) %{ 2597 enc_class load_N_enc (mRegN dst, memory mem) %{
1901 MacroAssembler _masm(&cbuf); 2598 MacroAssembler _masm(&cbuf);
1902 int dst = $dst$$reg; 2599 int dst = $dst$$reg;
1903 int base = $mem$$base; 2600 int base = $mem$$base;
1904 int index = $mem$$index; 2601 int index = $mem$$index;
1905 int scale = $mem$$scale; 2602 int scale = $mem$$scale;
1906 int disp = $mem$$disp; 2603 int disp = $mem$$disp;
1907 2604 relocInfo::relocType disp_reloc = $mem->disp_reloc();
1908 relocInfo::relocType disp_reloc = $mem->disp_reloc(); 2605 assert(disp_reloc == relocInfo::none, "cannot have disp");
1909 assert(disp_reloc == relocInfo::none, "cannot have disp"); 2606
1910 2607 if( index != 0 ) {
1911 assert(index == 0, "no index"); 2608 if (scale == 0) {
1912 __ lwu(as_Register(dst), as_Register(base), disp); 2609 __ daddu(AT, as_Register(base), as_Register(index));
2610 } else {
2611 __ dsll(AT, as_Register(index), scale);
2612 __ daddu(AT, as_Register(base), AT);
2613 }
2614 if( Assembler::is_simm16(disp) ) {
2615 __ lwu(as_Register(dst), AT, disp);
2616 } else {
2617 __ set64(T9, disp);
2618 __ daddu(AT, AT, T9);
2619 __ lwu(as_Register(dst), AT, 0);
2620 }
2621 } else {
2622 if( Assembler::is_simm16(disp) ) {
2623 __ lwu(as_Register(dst), as_Register(base), disp);
2624 } else {
2625 __ set64(T9, disp);
2626 __ daddu(AT, as_Register(base), T9);
2627 __ lwu(as_Register(dst), AT, 0);
2628 }
2629 }
2630
1913 %} 2631 %}
1914 2632
1915 2633
1916 enc_class load_P_enc (mRegP dst, memory mem) %{ 2634 enc_class load_P_enc (mRegP dst, memory mem) %{
1917 MacroAssembler _masm(&cbuf); 2635 MacroAssembler _masm(&cbuf);
1918 int dst = $dst$$reg; 2636 int dst = $dst$$reg;
1919 int base = $mem$$base; 2637 int base = $mem$$base;
1920 int index = $mem$$index; 2638 int index = $mem$$index;
1921 int scale = $mem$$scale; 2639 int scale = $mem$$scale;
1922 int disp = $mem$$disp; 2640 int disp = $mem$$disp;
1923 2641 relocInfo::relocType disp_reloc = $mem->disp_reloc();
1924 relocInfo::relocType disp_reloc = $mem->disp_reloc(); 2642 assert(disp_reloc == relocInfo::none, "cannot have disp");
1925 assert(disp_reloc == relocInfo::none, "cannot have disp");
1926 2643
1927 if( index != 0 ) { 2644 if( index != 0 ) {
1928 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2645 if ( UseLoongsonISA ) {
1929 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp); 2646 if ( Assembler::is_simm(disp, 8) ) {
2647 if ( scale != 0 ) {
2648 __ dsll(AT, as_Register(index), scale);
2649 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2650 } else {
2651 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2652 }
2653 } else if ( Assembler::is_simm16(disp) ){
2654 if ( scale != 0 ) {
2655 __ dsll(AT, as_Register(index), scale);
2656 __ daddu(AT, AT, as_Register(base));
2657 } else {
2658 __ daddu(AT, as_Register(index), as_Register(base));
2659 }
2660 __ ld(as_Register(dst), AT, disp);
2661 } else {
2662 if ( scale != 0 ) {
2663 __ dsll(AT, as_Register(index), scale);
2664 __ move(T9, disp);
2665 __ daddu(AT, AT, T9);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(index), T9);
2669 }
2670 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2671 }
2672 } else { //not use loongson isa
2673 if (scale == 0) {
2674 __ daddu(AT, as_Register(base), as_Register(index));
2675 } else {
2676 __ dsll(AT, as_Register(index), scale);
2677 __ daddu(AT, as_Register(base), AT);
2678 }
2679 if( Assembler::is_simm16(disp) ) {
2680 __ ld(as_Register(dst), AT, disp);
2681 } else {
2682 __ set64(T9, disp);
2683 __ daddu(AT, AT, T9);
2684 __ ld(as_Register(dst), AT, 0);
2685 }
2686 }
1930 } else { 2687 } else {
1931 __ ld(as_Register(dst), as_Register(base), disp); 2688 if ( UseLoongsonISA ) {
2689 if ( Assembler::is_simm16(disp) ){
2690 __ ld(as_Register(dst), as_Register(base), disp);
2691 } else {
2692 __ set64(T9, disp);
2693 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2694 }
2695 } else { //not use loongson isa
2696 if( Assembler::is_simm16(disp) ) {
2697 __ ld(as_Register(dst), as_Register(base), disp);
2698 } else {
2699 __ set64(T9, disp);
2700 __ daddu(AT, as_Register(base), T9);
2701 __ ld(as_Register(dst), AT, 0);
2702 }
2703 }
1932 } 2704 }
2705 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
1933 %} 2706 %}
1934 2707
1935 enc_class store_P_reg_enc (memory mem, mRegP src) %{ 2708 enc_class store_P_reg_enc (memory mem, mRegP src) %{
1936 MacroAssembler _masm(&cbuf); 2709 MacroAssembler _masm(&cbuf);
1937 int src = $src$$reg; 2710 int src = $src$$reg;
1939 int index = $mem$$index; 2712 int index = $mem$$index;
1940 int scale = $mem$$scale; 2713 int scale = $mem$$scale;
1941 int disp = $mem$$disp; 2714 int disp = $mem$$disp;
1942 2715
1943 if( index != 0 ) { 2716 if( index != 0 ) {
1944 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2717 if ( UseLoongsonISA ){
1945 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp); 2718 if ( Assembler::is_simm(disp, 8) ) {
2719 if ( scale == 0 ) {
2720 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2721 } else {
2722 __ dsll(AT, as_Register(index), scale);
2723 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2724 }
2725 } else if ( Assembler::is_simm16(disp) ) {
2726 if ( scale == 0 ) {
2727 __ daddu(AT, as_Register(base), as_Register(index));
2728 } else {
2729 __ dsll(AT, as_Register(index), scale);
2730 __ daddu(AT, as_Register(base), AT);
2731 }
2732 __ sd(as_Register(src), AT, disp);
2733 } else {
2734 if ( scale == 0 ) {
2735 __ move(T9, disp);
2736 __ daddu(AT, as_Register(index), T9);
2737 } else {
2738 __ dsll(AT, as_Register(index), scale);
2739 __ move(T9, disp);
2740 __ daddu(AT, AT, T9);
2741 }
2742 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2743 }
2744 } else { //not use loongson isa
2745 if (scale == 0) {
2746 __ daddu(AT, as_Register(base), as_Register(index));
2747 } else {
2748 __ dsll(AT, as_Register(index), scale);
2749 __ daddu(AT, as_Register(base), AT);
2750 }
2751 if( Assembler::is_simm16(disp) ) {
2752 __ sd(as_Register(src), AT, disp);
2753 } else {
2754 __ move(T9, disp);
2755 __ daddu(AT, AT, T9);
2756 __ sd(as_Register(src), AT, 0);
2757 }
2758 }
1946 } else { 2759 } else {
1947 __ sd(as_Register(src), as_Register(base), disp); 2760 if ( UseLoongsonISA ) {
2761 if ( Assembler::is_simm16(disp) ) {
2762 __ sd(as_Register(src), as_Register(base), disp);
2763 } else {
2764 __ move(T9, disp);
2765 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2766 }
2767 } else {
2768 if( Assembler::is_simm16(disp) ) {
2769 __ sd(as_Register(src), as_Register(base), disp);
2770 } else {
2771 __ move(T9, disp);
2772 __ daddu(AT, as_Register(base), T9);
2773 __ sd(as_Register(src), AT, 0);
2774 }
2775 }
1948 } 2776 }
1949 %} 2777 %}
1950 2778
1951 enc_class store_N_reg_enc (memory mem, mRegN src) %{ 2779 enc_class store_N_reg_enc (memory mem, mRegN src) %{
1952 MacroAssembler _masm(&cbuf); 2780 MacroAssembler _masm(&cbuf);
1955 int index = $mem$$index; 2783 int index = $mem$$index;
1956 int scale = $mem$$scale; 2784 int scale = $mem$$scale;
1957 int disp = $mem$$disp; 2785 int disp = $mem$$disp;
1958 2786
1959 if( index != 0 ) { 2787 if( index != 0 ) {
1960 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2788 if ( UseLoongsonISA ){
1961 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); 2789 if ( Assembler::is_simm(disp, 8) ) {
2790 if ( scale == 0 ) {
2791 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2792 } else {
2793 __ dsll(AT, as_Register(index), scale);
2794 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2795 }
2796 } else if ( Assembler::is_simm16(disp) ) {
2797 if ( scale == 0 ) {
2798 __ daddu(AT, as_Register(base), as_Register(index));
2799 } else {
2800 __ dsll(AT, as_Register(index), scale);
2801 __ daddu(AT, as_Register(base), AT);
2802 }
2803 __ sw(as_Register(src), AT, disp);
2804 } else {
2805 if ( scale == 0 ) {
2806 __ move(T9, disp);
2807 __ daddu(AT, as_Register(index), T9);
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ move(T9, disp);
2811 __ daddu(AT, AT, T9);
2812 }
2813 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2814 }
2815 } else { //not use loongson isa
2816 if (scale == 0) {
2817 __ daddu(AT, as_Register(base), as_Register(index));
2818 } else {
2819 __ dsll(AT, as_Register(index), scale);
2820 __ daddu(AT, as_Register(base), AT);
2821 }
2822 if( Assembler::is_simm16(disp) ) {
2823 __ sw(as_Register(src), AT, disp);
2824 } else {
2825 __ move(T9, disp);
2826 __ daddu(AT, AT, T9);
2827 __ sw(as_Register(src), AT, 0);
2828 }
2829 }
1962 } else { 2830 } else {
1963 __ sw(as_Register(src), as_Register(base), disp); 2831 if ( UseLoongsonISA ) {
2832 if ( Assembler::is_simm16(disp) ) {
2833 __ sw(as_Register(src), as_Register(base), disp);
2834 } else {
2835 __ move(T9, disp);
2836 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2837 }
2838 } else {
2839 if( Assembler::is_simm16(disp) ) {
2840 __ sw(as_Register(src), as_Register(base), disp);
2841 } else {
2842 __ move(T9, disp);
2843 __ daddu(AT, as_Register(base), T9);
2844 __ sw(as_Register(src), AT, 0);
2845 }
2846 }
1964 } 2847 }
1965 %} 2848 %}
1966 2849
1967 enc_class store_P_immP0_enc (memory mem) %{ 2850 enc_class store_P_immP0_enc (memory mem) %{
1968 MacroAssembler _masm(&cbuf); 2851 MacroAssembler _masm(&cbuf);
1970 int index = $mem$$index; 2853 int index = $mem$$index;
1971 int scale = $mem$$scale; 2854 int scale = $mem$$scale;
1972 int disp = $mem$$disp; 2855 int disp = $mem$$disp;
1973 2856
1974 if( index != 0 ) { 2857 if( index != 0 ) {
1975 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2858 if (scale == 0) {
1976 __ gssdx(R0, as_Register(base), as_Register(index), disp); 2859 if( Assembler::is_simm16(disp) ) {
2860 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2861 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2862 } else {
2863 __ daddu(AT, as_Register(base), as_Register(index));
2864 __ sd(R0, AT, disp);
2865 }
2866 } else {
2867 __ daddu(AT, as_Register(base), as_Register(index));
2868 __ move(T9, disp);
2869 if(UseLoongsonISA) {
2870 __ gssdx(R0, AT, T9, 0);
2871 } else {
2872 __ daddu(AT, AT, T9);
2873 __ sd(R0, AT, 0);
2874 }
2875 }
2876 } else {
2877 __ dsll(AT, as_Register(index), scale);
2878 if( Assembler::is_simm16(disp) ) {
2879 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2880 __ gssdx(R0, as_Register(base), AT, disp);
2881 } else {
2882 __ daddu(AT, as_Register(base), AT);
2883 __ sd(R0, AT, disp);
2884 }
2885 } else {
2886 __ daddu(AT, as_Register(base), AT);
2887 __ move(T9, disp);
2888 if (UseLoongsonISA) {
2889 __ gssdx(R0, AT, T9, 0);
2890 } else {
2891 __ daddu(AT, AT, T9);
2892 __ sd(R0, AT, 0);
2893 }
2894 }
2895 }
1977 } else { 2896 } else {
1978 __ sd(R0, as_Register(base), disp); 2897 if( Assembler::is_simm16(disp) ) {
2898 __ sd(R0, as_Register(base), disp);
2899 } else {
2900 __ move(T9, disp);
2901 if (UseLoongsonISA) {
2902 __ gssdx(R0, as_Register(base), T9, 0);
2903 } else {
2904 __ daddu(AT, as_Register(base), T9);
2905 __ sd(R0, AT, 0);
2906 }
2907 }
1979 } 2908 }
1980 %} 2909 %}
1981 2910
1982 2911 enc_class store_P_immP_enc (memory mem, immP31 src) %{
1983 enc_class storeImmN0_enc(memory mem) %{
1984 MacroAssembler _masm(&cbuf); 2912 MacroAssembler _masm(&cbuf);
1985 int base = $mem$$base; 2913 int base = $mem$$base;
1986 int index = $mem$$index; 2914 int index = $mem$$index;
1987 int scale = $mem$$scale; 2915 int scale = $mem$$scale;
1988 int disp = $mem$$disp; 2916 int disp = $mem$$disp;
1989 2917 long value = $src$$constant;
1990 if(index != 0){ 2918
1991 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 2919 if( index != 0 ) {
1992 __ gsswx(R0, as_Register(base), as_Register(index), disp); 2920 if (scale == 0) {
2921 __ daddu(AT, as_Register(base), as_Register(index));
2922 } else {
2923 __ dsll(AT, as_Register(index), scale);
2924 __ daddu(AT, as_Register(base), AT);
2925 }
2926 if( Assembler::is_simm16(disp) ) {
2927 if (value == 0) {
2928 __ sd(R0, AT, disp);
2929 } else {
2930 __ move(T9, value);
2931 __ sd(T9, AT, disp);
2932 }
2933 } else {
2934 if (value == 0) {
2935 __ move(T9, disp);
2936 __ daddu(AT, AT, T9);
2937 __ sd(R0, AT, 0);
2938 } else {
2939 __ move(T9, disp);
2940 __ daddu(AT, AT, T9);
2941 __ move(T9, value);
2942 __ sd(T9, AT, 0);
2943 }
2944 }
1993 } else { 2945 } else {
1994 __ sw(R0, as_Register(base), disp); 2946 if( Assembler::is_simm16(disp) ) {
2947 if (value == 0) {
2948 __ sd(R0, as_Register(base), disp);
2949 } else {
2950 __ move(AT, value);
2951 __ sd(AT, as_Register(base), disp);
2952 }
2953 } else {
2954 if (value == 0) {
2955 __ move(T9, disp);
2956 __ daddu(AT, as_Register(base), T9);
2957 __ sd(R0, AT, 0);
2958 } else {
2959 __ move(T9, disp);
2960 __ daddu(AT, as_Register(base), T9);
2961 __ move(T9, value);
2962 __ sd(T9, AT, 0);
2963 }
2964 }
1995 } 2965 }
2966 %}
2967
2968 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2969 MacroAssembler _masm(&cbuf);
2970 int base = $mem$$base;
2971 int index = $mem$$index;
2972 int scale = $mem$$scale;
2973 int disp = $mem$$disp;
2974
2975 if(index!=0){
2976 if (scale == 0) {
2977 __ daddu(AT, as_Register(base), as_Register(index));
2978 } else {
2979 __ dsll(AT, as_Register(index), scale);
2980 __ daddu(AT, as_Register(base), AT);
2981 }
2982
2983 if( Assembler::is_simm16(disp) ) {
2984 __ sw(R0, AT, disp);
2985 } else {
2986 __ move(T9, disp);
2987 __ daddu(AT, AT, T9);
2988 __ sw(R0, AT, 0);
2989 }
2990 }
2991 else {
2992 if( Assembler::is_simm16(disp) ) {
2993 __ sw(R0, as_Register(base), disp);
2994 } else {
2995 __ move(T9, disp);
2996 __ daddu(AT, as_Register(base), T9);
2997 __ sw(R0, AT, 0);
2998 }
2999 }
1996 %} 3000 %}
3001
3002 enc_class storeImmN_enc (memory mem, immN src) %{
3003 MacroAssembler _masm(&cbuf);
3004 int base = $mem$$base;
3005 int index = $mem$$index;
3006 int scale = $mem$$scale;
3007 int disp = $mem$$disp;
3008 long * value = (long *)$src$$constant;
3009
3010 if (value == NULL) {
3011 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3012 if (index == 0) {
3013 __ sw(R0, as_Register(base), disp);
3014 } else {
3015 if (scale == 0) {
3016 __ daddu(AT, as_Register(base), as_Register(index));
3017 } else {
3018 __ dsll(AT, as_Register(index), scale);
3019 __ daddu(AT, as_Register(base), AT);
3020 }
3021 __ sw(R0, AT, disp);
3022 }
3023
3024 return;
3025 }
3026
3027 int oop_index = __ oop_recorder()->find_index((jobject)value);
3028 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3029
3030 guarantee(scale == 0, "FIXME: scale is not zero !");
3031 guarantee(value != 0, "FIXME: value is zero !");
3032
3033 if (index != 0) {
3034 if (scale == 0) {
3035 __ daddu(AT, as_Register(base), as_Register(index));
3036 } else {
3037 __ dsll(AT, as_Register(index), scale);
3038 __ daddu(AT, as_Register(base), AT);
3039 }
3040 if( Assembler::is_simm16(disp) ) {
3041 if(rspec.type() != relocInfo::none) {
3042 __ relocate(rspec, Assembler::narrow_oop_operand);
3043 __ patchable_set48(T9, oop_index);
3044 } else {
3045 __ set64(T9, oop_index);
3046 }
3047 __ sw(T9, AT, disp);
3048 } else {
3049 __ move(T9, disp);
3050 __ addu(AT, AT, T9);
3051
3052 if(rspec.type() != relocInfo::none) {
3053 __ relocate(rspec, Assembler::narrow_oop_operand);
3054 __ patchable_set48(T9, oop_index);
3055 } else {
3056 __ set64(T9, oop_index);
3057 }
3058 __ sw(T9, AT, 0);
3059 }
3060 }
3061 else {
3062 if( Assembler::is_simm16(disp) ) {
3063 if($src->constant_reloc() != relocInfo::none) {
3064 __ relocate(rspec, Assembler::narrow_oop_operand);
3065 __ patchable_set48(T9, oop_index);
3066 } else {
3067 __ set64(T9, oop_index);
3068 }
3069 __ sw(T9, as_Register(base), disp);
3070 } else {
3071 __ move(T9, disp);
3072 __ daddu(AT, as_Register(base), T9);
3073
3074 if($src->constant_reloc() != relocInfo::none){
3075 __ relocate(rspec, Assembler::narrow_oop_operand);
3076 __ patchable_set48(T9, oop_index);
3077 } else {
3078 __ set64(T9, oop_index);
3079 }
3080 __ sw(T9, AT, 0);
3081 }
3082 }
3083 %}
3084
3085 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3086 MacroAssembler _masm(&cbuf);
3087
3088 assert (UseCompressedOops, "should only be used for compressed headers");
3089 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3090
3091 int base = $mem$$base;
3092 int index = $mem$$index;
3093 int scale = $mem$$scale;
3094 int disp = $mem$$disp;
3095 long value = $src$$constant;
3096
3097 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3098 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3099 long narrowp = Klass::encode_klass((Klass*)value);
3100
3101 if(index!=0){
3102 if (scale == 0) {
3103 __ daddu(AT, as_Register(base), as_Register(index));
3104 } else {
3105 __ dsll(AT, as_Register(index), scale);
3106 __ daddu(AT, as_Register(base), AT);
3107 }
3108
3109 if( Assembler::is_simm16(disp) ) {
3110 if(rspec.type() != relocInfo::none){
3111 __ relocate(rspec, Assembler::narrow_oop_operand);
3112 __ patchable_set48(T9, narrowp);
3113 } else {
3114 __ set64(T9, narrowp);
3115 }
3116 __ sw(T9, AT, disp);
3117 } else {
3118 __ move(T9, disp);
3119 __ daddu(AT, AT, T9);
3120
3121 if(rspec.type() != relocInfo::none){
3122 __ relocate(rspec, Assembler::narrow_oop_operand);
3123 __ patchable_set48(T9, narrowp);
3124 } else {
3125 __ set64(T9, narrowp);
3126 }
3127
3128 __ sw(T9, AT, 0);
3129 }
3130 } else {
3131 if( Assembler::is_simm16(disp) ) {
3132 if(rspec.type() != relocInfo::none){
3133 __ relocate(rspec, Assembler::narrow_oop_operand);
3134 __ patchable_set48(T9, narrowp);
3135 }
3136 else {
3137 __ set64(T9, narrowp);
3138 }
3139 __ sw(T9, as_Register(base), disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ daddu(AT, as_Register(base), T9);
3143
3144 if(rspec.type() != relocInfo::none){
3145 __ relocate(rspec, Assembler::narrow_oop_operand);
3146 __ patchable_set48(T9, narrowp);
3147 } else {
3148 __ set64(T9, narrowp);
3149 }
3150 __ sw(T9, AT, 0);
3151 }
3152 }
3153 %}
1997 3154
1998 enc_class load_L_enc (mRegL dst, memory mem) %{ 3155 enc_class load_L_enc (mRegL dst, memory mem) %{
1999 MacroAssembler _masm(&cbuf); 3156 MacroAssembler _masm(&cbuf);
2000 int base = $mem$$base; 3157 int base = $mem$$base;
2001 int index = $mem$$index; 3158 int index = $mem$$index;
2002 int scale = $mem$$scale; 3159 int scale = $mem$$scale;
2003 int disp = $mem$$disp; 3160 int disp = $mem$$disp;
2004 Register dst_reg = as_Register($dst$$reg); 3161 Register dst_reg = as_Register($dst$$reg);
2005 3162
3163 // For implicit null check
3164 __ lb(AT, as_Register(base), 0);
3165
2006 if( index != 0 ) { 3166 if( index != 0 ) {
2007 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3167 if (scale == 0) {
2008 __ gsldx(dst_reg, as_Register(base), as_Register(index), disp); 3168 __ daddu(AT, as_Register(base), as_Register(index));
3169 } else {
3170 __ dsll(AT, as_Register(index), scale);
3171 __ daddu(AT, as_Register(base), AT);
3172 }
3173 if( Assembler::is_simm16(disp) ) {
3174 __ ld(dst_reg, AT, disp);
3175 } else {
3176 __ move(T9, disp);
3177 __ daddu(AT, AT, T9);
3178 __ ld(dst_reg, AT, 0);
3179 }
2009 } else { 3180 } else {
2010 __ ld(dst_reg, as_Register(base), disp); 3181 if( Assembler::is_simm16(disp) ) {
3182 __ ld(dst_reg, as_Register(base), disp);
3183 } else {
3184 __ move(T9, disp);
3185 __ daddu(AT, as_Register(base), T9);
3186 __ ld(dst_reg, AT, 0);
3187 }
2011 } 3188 }
2012 %} 3189 %}
2013 3190
2014 enc_class store_L_reg_enc (memory mem, mRegL src) %{ 3191 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2015 MacroAssembler _masm(&cbuf); 3192 MacroAssembler _masm(&cbuf);
2018 int scale = $mem$$scale; 3195 int scale = $mem$$scale;
2019 int disp = $mem$$disp; 3196 int disp = $mem$$disp;
2020 Register src_reg = as_Register($src$$reg); 3197 Register src_reg = as_Register($src$$reg);
2021 3198
2022 if( index != 0 ) { 3199 if( index != 0 ) {
2023 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3200 if (scale == 0) {
2024 __ gssdx(src_reg, as_Register(base), as_Register(index), disp); 3201 __ daddu(AT, as_Register(base), as_Register(index));
3202 } else {
3203 __ dsll(AT, as_Register(index), scale);
3204 __ daddu(AT, as_Register(base), AT);
3205 }
3206 if( Assembler::is_simm16(disp) ) {
3207 __ sd(src_reg, AT, disp);
3208 } else {
3209 __ move(T9, disp);
3210 __ daddu(AT, AT, T9);
3211 __ sd(src_reg, AT, 0);
3212 }
2025 } else { 3213 } else {
2026 __ sd(src_reg, as_Register(base), disp); 3214 if( Assembler::is_simm16(disp) ) {
3215 __ sd(src_reg, as_Register(base), disp);
3216 } else {
3217 __ move(T9, disp);
3218 __ daddu(AT, as_Register(base), T9);
3219 __ sd(src_reg, AT, 0);
3220 }
2027 } 3221 }
2028 %} 3222 %}
2029 3223
2030 enc_class store_L_immL0_enc (memory mem) %{ 3224 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2031 MacroAssembler _masm(&cbuf); 3225 MacroAssembler _masm(&cbuf);
2032 int base = $mem$$base; 3226 int base = $mem$$base;
2033 int index = $mem$$index; 3227 int index = $mem$$index;
2034 int scale = $mem$$scale; 3228 int scale = $mem$$scale;
2035 int disp = $mem$$disp; 3229 int disp = $mem$$disp;
2036 3230
2037 if( index != 0 ) { 3231 if( index != 0 ) {
2038 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3232 // For implicit null check
2039 __ gssdx(R0, as_Register(base), as_Register(index), disp); 3233 __ lb(AT, as_Register(base), 0);
3234
3235 if (scale == 0) {
3236 __ daddu(AT, as_Register(base), as_Register(index));
3237 } else {
3238 __ dsll(AT, as_Register(index), scale);
3239 __ daddu(AT, as_Register(base), AT);
3240 }
3241 if( Assembler::is_simm16(disp) ) {
3242 __ sd(R0, AT, disp);
3243 } else {
3244 __ move(T9, disp);
3245 __ addu(AT, AT, T9);
3246 __ sd(R0, AT, 0);
3247 }
2040 } else { 3248 } else {
2041 __ sd(R0, as_Register(base), disp); 3249 if( Assembler::is_simm16(disp) ) {
3250 __ sd(R0, as_Register(base), disp);
3251 } else {
3252 __ move(T9, disp);
3253 __ addu(AT, as_Register(base), T9);
3254 __ sd(R0, AT, 0);
3255 }
3256 }
3257 %}
3258
3259 enc_class store_L_immL_enc (memory mem, immL src) %{
3260 MacroAssembler _masm(&cbuf);
3261 int base = $mem$$base;
3262 int index = $mem$$index;
3263 int scale = $mem$$scale;
3264 int disp = $mem$$disp;
3265 long imm = $src$$constant;
3266
3267 if( index != 0 ) {
3268 if (scale == 0) {
3269 __ daddu(AT, as_Register(base), as_Register(index));
3270 } else {
3271 __ dsll(AT, as_Register(index), scale);
3272 __ daddu(AT, as_Register(base), AT);
3273 }
3274 if( Assembler::is_simm16(disp) ) {
3275 __ set64(T9, imm);
3276 __ sd(T9, AT, disp);
3277 } else {
3278 __ move(T9, disp);
3279 __ addu(AT, AT, T9);
3280 __ set64(T9, imm);
3281 __ sd(T9, AT, 0);
3282 }
3283 } else {
3284 if( Assembler::is_simm16(disp) ) {
3285 __ move(AT, as_Register(base));
3286 __ set64(T9, imm);
3287 __ sd(T9, AT, disp);
3288 } else {
3289 __ move(T9, disp);
3290 __ addu(AT, as_Register(base), T9);
3291 __ set64(T9, imm);
3292 __ sd(T9, AT, 0);
3293 }
2042 } 3294 }
2043 %} 3295 %}
2044 3296
2045 enc_class load_F_enc (regF dst, memory mem) %{ 3297 enc_class load_F_enc (regF dst, memory mem) %{
2046 MacroAssembler _masm(&cbuf); 3298 MacroAssembler _masm(&cbuf);
2049 int scale = $mem$$scale; 3301 int scale = $mem$$scale;
2050 int disp = $mem$$disp; 3302 int disp = $mem$$disp;
2051 FloatRegister dst = $dst$$FloatRegister; 3303 FloatRegister dst = $dst$$FloatRegister;
2052 3304
2053 if( index != 0 ) { 3305 if( index != 0 ) {
2054 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3306 if( Assembler::is_simm16(disp) ) {
2055 __ gslwxc1(dst, as_Register(base), as_Register(index), disp); 3307 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3308 if (scale == 0) {
3309 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3310 } else {
3311 __ dsll(AT, as_Register(index), scale);
3312 __ gslwxc1(dst, as_Register(base), AT, disp);
3313 }
3314 } else {
3315 if (scale == 0) {
3316 __ daddu(AT, as_Register(base), as_Register(index));
3317 } else {
3318 __ dsll(AT, as_Register(index), scale);
3319 __ daddu(AT, as_Register(base), AT);
3320 }
3321 __ lwc1(dst, AT, disp);
3322 }
3323 } else {
3324 if (scale == 0) {
3325 __ daddu(AT, as_Register(base), as_Register(index));
3326 } else {
3327 __ dsll(AT, as_Register(index), scale);
3328 __ daddu(AT, as_Register(base), AT);
3329 }
3330 __ move(T9, disp);
3331 if( UseLoongsonISA ) {
3332 __ gslwxc1(dst, AT, T9, 0);
3333 } else {
3334 __ daddu(AT, AT, T9);
3335 __ lwc1(dst, AT, 0);
3336 }
3337 }
2056 } else { 3338 } else {
2057 __ lwc1(dst, as_Register(base), disp); 3339 if( Assembler::is_simm16(disp) ) {
3340 __ lwc1(dst, as_Register(base), disp);
3341 } else {
3342 __ move(T9, disp);
3343 if( UseLoongsonISA ) {
3344 __ gslwxc1(dst, as_Register(base), T9, 0);
3345 } else {
3346 __ daddu(AT, as_Register(base), T9);
3347 __ lwc1(dst, AT, 0);
3348 }
3349 }
2058 } 3350 }
2059 %} 3351 %}
2060 3352
2061 enc_class store_F_reg_enc (memory mem, regF src) %{ 3353 enc_class store_F_reg_enc (memory mem, regF src) %{
2062 MacroAssembler _masm(&cbuf); 3354 MacroAssembler _masm(&cbuf);
2065 int scale = $mem$$scale; 3357 int scale = $mem$$scale;
2066 int disp = $mem$$disp; 3358 int disp = $mem$$disp;
2067 FloatRegister src = $src$$FloatRegister; 3359 FloatRegister src = $src$$FloatRegister;
2068 3360
2069 if( index != 0 ) { 3361 if( index != 0 ) {
2070 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3362 if( Assembler::is_simm16(disp) ) {
2071 __ gsswxc1(src, as_Register(base), as_Register(index), disp); 3363 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3364 if (scale == 0) {
3365 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3366 } else {
3367 __ dsll(AT, as_Register(index), scale);
3368 __ gsswxc1(src, as_Register(base), AT, disp);
3369 }
3370 } else {
3371 if (scale == 0) {
3372 __ daddu(AT, as_Register(base), as_Register(index));
3373 } else {
3374 __ dsll(AT, as_Register(index), scale);
3375 __ daddu(AT, as_Register(base), AT);
3376 }
3377 __ swc1(src, AT, disp);
3378 }
3379 } else {
3380 if (scale == 0) {
3381 __ daddu(AT, as_Register(base), as_Register(index));
3382 } else {
3383 __ dsll(AT, as_Register(index), scale);
3384 __ daddu(AT, as_Register(base), AT);
3385 }
3386 __ move(T9, disp);
3387 if( UseLoongsonISA ) {
3388 __ gsswxc1(src, AT, T9, 0);
3389 } else {
3390 __ daddu(AT, AT, T9);
3391 __ swc1(src, AT, 0);
3392 }
3393 }
2072 } else { 3394 } else {
2073 __ swc1(src, as_Register(base), disp); 3395 if( Assembler::is_simm16(disp) ) {
3396 __ swc1(src, as_Register(base), disp);
3397 } else {
3398 __ move(T9, disp);
3399 if( UseLoongsonISA ) {
3400 __ gslwxc1(src, as_Register(base), T9, 0);
3401 } else {
3402 __ daddu(AT, as_Register(base), T9);
3403 __ swc1(src, AT, 0);
3404 }
3405 }
2074 } 3406 }
2075 %} 3407 %}
2076 3408
2077 enc_class load_D_enc (regD dst, memory mem) %{ 3409 enc_class load_D_enc (regD dst, memory mem) %{
2078 MacroAssembler _masm(&cbuf); 3410 MacroAssembler _masm(&cbuf);
2081 int scale = $mem$$scale; 3413 int scale = $mem$$scale;
2082 int disp = $mem$$disp; 3414 int disp = $mem$$disp;
2083 FloatRegister dst_reg = as_FloatRegister($dst$$reg); 3415 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2084 3416
2085 if( index != 0 ) { 3417 if( index != 0 ) {
2086 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3418 if( Assembler::is_simm16(disp) ) {
2087 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp); 3419 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3420 if (scale == 0) {
3421 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3422 } else {
3423 __ dsll(AT, as_Register(index), scale);
3424 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3425 }
3426 } else {
3427 if (scale == 0) {
3428 __ daddu(AT, as_Register(base), as_Register(index));
3429 } else {
3430 __ dsll(AT, as_Register(index), scale);
3431 __ daddu(AT, as_Register(base), AT);
3432 }
3433 __ ldc1(dst_reg, AT, disp);
3434 }
3435 } else {
3436 if (scale == 0) {
3437 __ daddu(AT, as_Register(base), as_Register(index));
3438 } else {
3439 __ dsll(AT, as_Register(index), scale);
3440 __ daddu(AT, as_Register(base), AT);
3441 }
3442 __ move(T9, disp);
3443 if( UseLoongsonISA ) {
3444 __ gsldxc1(dst_reg, AT, T9, 0);
3445 } else {
3446 __ addu(AT, AT, T9);
3447 __ ldc1(dst_reg, AT, 0);
3448 }
3449 }
2088 } else { 3450 } else {
2089 __ ldc1(dst_reg, as_Register(base), disp); 3451 if( Assembler::is_simm16(disp) ) {
3452 __ ldc1(dst_reg, as_Register(base), disp);
3453 } else {
3454 __ move(T9, disp);
3455 if( UseLoongsonISA ) {
3456 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3457 } else {
3458 __ addu(AT, as_Register(base), T9);
3459 __ ldc1(dst_reg, AT, 0);
3460 }
3461 }
2090 } 3462 }
2091 %} 3463 %}
2092 3464
2093 enc_class store_D_reg_enc (memory mem, regD src) %{ 3465 enc_class store_D_reg_enc (memory mem, regD src) %{
2094 MacroAssembler _masm(&cbuf); 3466 MacroAssembler _masm(&cbuf);
2095 int base = $mem$$base; 3467 int base = $mem$$base;
2096 int index = $mem$$index; 3468 int index = $mem$$index;
2097 int scale = $mem$$scale; 3469 int scale = $mem$$scale;
2098 int disp = $mem$$disp; 3470 int disp = $mem$$disp;
2099 FloatRegister src_reg = as_FloatRegister($src$$reg); 3471 FloatRegister src_reg = as_FloatRegister($src$$reg);
2100 3472
2101 if( index != 0 ) { 3473 if( index != 0 ) {
2102 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 3474 if( Assembler::is_simm16(disp) ) {
2103 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp); 3475 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3476 if (scale == 0) {
3477 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3478 } else {
3479 __ dsll(AT, as_Register(index), scale);
3480 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3481 }
3482 } else {
3483 if (scale == 0) {
3484 __ daddu(AT, as_Register(base), as_Register(index));
3485 } else {
3486 __ dsll(AT, as_Register(index), scale);
3487 __ daddu(AT, as_Register(base), AT);
3488 }
3489 __ sdc1(src_reg, AT, disp);
3490 }
3491 } else {
3492 if (scale == 0) {
3493 __ daddu(AT, as_Register(base), as_Register(index));
3494 } else {
3495 __ dsll(AT, as_Register(index), scale);
3496 __ daddu(AT, as_Register(base), AT);
3497 }
3498 __ move(T9, disp);
3499 if( UseLoongsonISA ) {
3500 __ gssdxc1(src_reg, AT, T9, 0);
3501 } else {
3502 __ addu(AT, AT, T9);
3503 __ sdc1(src_reg, AT, 0);
3504 }
3505 }
2104 } else { 3506 } else {
2105 __ sdc1(src_reg, as_Register(base), disp); 3507 if( Assembler::is_simm16(disp) ) {
3508 __ sdc1(src_reg, as_Register(base), disp);
3509 } else {
3510 __ move(T9, disp);
3511 if( UseLoongsonISA ) {
3512 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3513 } else {
3514 __ addu(AT, as_Register(base), T9);
3515 __ sdc1(src_reg, AT, 0);
3516 }
3517 }
2106 } 3518 }
2107 %} 3519 %}
2108 3520
2109 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf 3521 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
2110 MacroAssembler _masm(&cbuf); 3522 MacroAssembler _masm(&cbuf);
2661 op_cost(10); 4073 op_cost(10);
2662 format %{ %} 4074 format %{ %}
2663 interface(CONST_INTER); 4075 interface(CONST_INTER);
2664 %} 4076 %}
2665 4077
4078 operand immP31()
4079 %{
4080 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4081 && (n->get_ptr() >> 31) == 0);
4082 match(ConP);
4083
4084 op_cost(5);
4085 format %{ %}
4086 interface(CONST_INTER);
4087 %}
4088
2666 // NULL Pointer Immediate 4089 // NULL Pointer Immediate
2667 operand immP0() %{ 4090 operand immP0() %{
2668 predicate( n->get_ptr() == 0 ); 4091 predicate( n->get_ptr() == 0 );
2669 match(ConP); 4092 match(ConP);
2670 op_cost(0); 4093 op_cost(0);
3931 format %{ %} 5354 format %{ %}
3932 interface(REG_INTER); 5355 interface(REG_INTER);
3933 %} 5356 %}
3934 5357
3935 //----------Memory Operands---------------------------------------------------- 5358 //----------Memory Operands----------------------------------------------------
3936 operand baseOffset16(mRegP reg, immL16 off) 5359 // Indirect Memory Operand
5360 operand indirect(mRegP reg) %{
5361 constraint(ALLOC_IN_RC(p_reg));
5362 match(reg);
5363
5364 format %{ "[$reg] @ indirect" %}
5365 interface(MEMORY_INTER) %{
5366 base($reg);
5367 index(0x0); /* NO_INDEX */
5368 scale(0x0);
5369 disp(0x0);
5370 %}
5371 %}
5372
5373 // Indirect Memory Plus Short Offset Operand
5374 operand indOffset8(mRegP reg, immL8 off)
3937 %{ 5375 %{
3938 constraint(ALLOC_IN_RC(p_reg)); 5376 constraint(ALLOC_IN_RC(p_reg));
3939 match(AddP reg off); 5377 match(AddP reg off);
3940 5378
5379 op_cost(10);
5380 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5381 interface(MEMORY_INTER) %{
5382 base($reg);
5383 index(0x0); /* NO_INDEX */
5384 scale(0x0);
5385 disp($off);
5386 %}
5387 %}
5388
5389 // Indirect Memory Times Scale Plus Index Register
5390 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5391 %{
5392 constraint(ALLOC_IN_RC(p_reg));
5393 match(AddP reg (LShiftL lreg scale));
5394
5395 op_cost(10);
5396 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5397 interface(MEMORY_INTER) %{
5398 base($reg);
5399 index($lreg);
5400 scale($scale);
5401 disp(0x0);
5402 %}
5403 %}
5404
5405
5406 // [base + index + offset]
5407 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5408 %{
5409 constraint(ALLOC_IN_RC(p_reg));
3941 op_cost(5); 5410 op_cost(5);
3942 format %{ "[$reg + $off (16-bit)] @ baseOffset16" %} 5411 match(AddP (AddP base index) off);
5412
5413 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5414 interface(MEMORY_INTER) %{
5415 base($base);
5416 index($index);
5417 scale(0x0);
5418 disp($off);
5419 %}
5420 %}
5421
5422 // [base + index + offset]
5423 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5424 %{
5425 constraint(ALLOC_IN_RC(p_reg));
5426 op_cost(5);
5427 match(AddP (AddP base (ConvI2L index)) off);
5428
5429 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5430 interface(MEMORY_INTER) %{
5431 base($base);
5432 index($index);
5433 scale(0x0);
5434 disp($off);
5435 %}
5436 %}
5437
5438 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5439 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5440 %{
5441 constraint(ALLOC_IN_RC(p_reg));
5442 match(AddP (AddP reg (LShiftL lreg scale)) off);
5443
5444 op_cost(10);
5445 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5446 interface(MEMORY_INTER) %{
5447 base($reg);
5448 index($lreg);
5449 scale($scale);
5450 disp($off);
5451 %}
5452 %}
5453
5454 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5455 %{
5456 constraint(ALLOC_IN_RC(p_reg));
5457 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5458
5459 op_cost(10);
5460 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5461 interface(MEMORY_INTER) %{
5462 base($reg);
5463 index($ireg);
5464 scale($scale);
5465 disp($off);
5466 %}
5467 %}
5468
5469 // [base + index<<scale + offset]
5470 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5471 %{
5472 constraint(ALLOC_IN_RC(p_reg));
5473 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5474 op_cost(10);
5475 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5476
5477 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5478 interface(MEMORY_INTER) %{
5479 base($base);
5480 index($index);
5481 scale($scale);
5482 disp($off);
5483 %}
5484 %}
5485
5486 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5487 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5488 %{
5489 predicate(Universe::narrow_oop_shift() == 0);
5490 constraint(ALLOC_IN_RC(p_reg));
5491 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5492
5493 op_cost(10);
5494 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5495 interface(MEMORY_INTER) %{
5496 base($reg);
5497 index($lreg);
5498 scale($scale);
5499 disp($off);
5500 %}
5501 %}
5502
5503 // [base + index<<scale + offset] for compressd Oops
5504 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5505 %{
5506 constraint(ALLOC_IN_RC(p_reg));
5507 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5508 predicate(Universe::narrow_oop_shift() == 0);
5509 op_cost(10);
5510 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5511
5512 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5513 interface(MEMORY_INTER) %{
5514 base($base);
5515 index($index);
5516 scale($scale);
5517 disp($off);
5518 %}
5519 %}
5520
5521 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5522 // Indirect Memory Plus Long Offset Operand
5523 operand indOffset32(mRegP reg, immL32 off) %{
5524 constraint(ALLOC_IN_RC(p_reg));
5525 op_cost(20);
5526 match(AddP reg off);
5527
5528 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5529 interface(MEMORY_INTER) %{
5530 base($reg);
5531 index(0x0); /* NO_INDEX */
5532 scale(0x0);
5533 disp($off);
5534 %}
5535 %}
5536
5537 // Indirect Memory Plus Index Register
5538 operand indIndex(mRegP addr, mRegL index) %{
5539 constraint(ALLOC_IN_RC(p_reg));
5540 match(AddP addr index);
5541
5542 op_cost(20);
5543 format %{"[$addr + $index] @ indIndex" %}
5544 interface(MEMORY_INTER) %{
5545 base($addr);
5546 index($index);
5547 scale(0x0);
5548 disp(0x0);
5549 %}
5550 %}
5551
5552 operand indirectNarrowKlass(mRegN reg)
5553 %{
5554 predicate(Universe::narrow_klass_shift() == 0);
5555 constraint(ALLOC_IN_RC(p_reg));
5556 op_cost(10);
5557 match(DecodeNKlass reg);
5558
5559 format %{ "[$reg] @ indirectNarrowKlass" %}
5560 interface(MEMORY_INTER) %{
5561 base($reg);
5562 index(0x0);
5563 scale(0x0);
5564 disp(0x0);
5565 %}
5566 %}
5567
5568 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5569 %{
5570 predicate(Universe::narrow_klass_shift() == 0);
5571 constraint(ALLOC_IN_RC(p_reg));
5572 op_cost(10);
5573 match(AddP (DecodeNKlass reg) off);
5574
5575 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
3943 interface(MEMORY_INTER) %{ 5576 interface(MEMORY_INTER) %{
3944 base($reg); 5577 base($reg);
3945 index(0x0); 5578 index(0x0);
3946 scale(0x0); 5579 scale(0x0);
3947 disp($off); 5580 disp($off);
3948 %} 5581 %}
3949 %} 5582 %}
3950 5583
3951 operand gsBaseIndexOffset8(mRegP base, mRegL index, immL8 off) 5584 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
3952 %{ 5585 %{
3953 predicate(UseLoongsonISA); 5586 predicate(Universe::narrow_klass_shift() == 0);
3954 constraint(ALLOC_IN_RC(p_reg)); 5587 constraint(ALLOC_IN_RC(p_reg));
3955 match(AddP (AddP base index) off); 5588 op_cost(10);
3956 5589 match(AddP (DecodeNKlass reg) off);
3957 op_cost(5); 5590
3958 format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexOffset8" %} 5591 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
3959 interface(MEMORY_INTER) %{ 5592 interface(MEMORY_INTER) %{
3960 base($base); 5593 base($reg);
3961 index($index); 5594 index(0x0);
3962 scale(0x0); 5595 scale(0x0);
3963 disp($off); 5596 disp($off);
3964 %} 5597 %}
3965 %} 5598 %}
3966 5599
3967 operand gsBaseIndexI2LOffset8(mRegP base, mRegI index, immL8 off) 5600 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
3968 %{ 5601 %{
3969 predicate(UseLoongsonISA); 5602 predicate(Universe::narrow_klass_shift() == 0);
3970 constraint(ALLOC_IN_RC(p_reg)); 5603 constraint(ALLOC_IN_RC(p_reg));
3971 match(AddP (AddP base (ConvI2L index)) off); 5604 match(AddP (AddP (DecodeNKlass reg) lreg) off);
3972 5605
3973 op_cost(5); 5606 op_cost(10);
3974 format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexI2LOffset8" %} 5607 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
3975 interface(MEMORY_INTER) %{ 5608 interface(MEMORY_INTER) %{
3976 base($base); 5609 base($reg);
3977 index($index); 5610 index($lreg);
3978 scale(0x0); 5611 scale(0x0);
3979 disp($off); 5612 disp($off);
3980 %} 5613 %}
3981 %} 5614 %}
3982 5615
3983 operand gsBaseIndexOffset0(mRegP addr, mRegL index) %{ 5616 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
3984 predicate(UseLoongsonISA); 5617 %{
5618 predicate(Universe::narrow_klass_shift() == 0);
3985 constraint(ALLOC_IN_RC(p_reg)); 5619 constraint(ALLOC_IN_RC(p_reg));
3986 match(AddP addr index); 5620 match(AddP (DecodeNKlass reg) lreg);
3987 5621
3988 op_cost(10); 5622 op_cost(10);
3989 format %{"[$addr + $index] @ gsBaseIndexOffset0" %} 5623 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
3990 interface(MEMORY_INTER) %{ 5624 interface(MEMORY_INTER) %{
3991 base($addr); 5625 base($reg);
3992 index($index); 5626 index($lreg);
3993 scale(0x0); 5627 scale(0x0);
3994 disp(0x0); 5628 disp(0x0);
3995 %} 5629 %}
3996 %} 5630 %}
3997 5631
3998 operand baseOffset0(mRegP reg) %{ 5632 // Indirect Memory Operand
5633 operand indirectNarrow(mRegN reg)
5634 %{
5635 predicate(Universe::narrow_oop_shift() == 0);
3999 constraint(ALLOC_IN_RC(p_reg)); 5636 constraint(ALLOC_IN_RC(p_reg));
4000 op_cost(10); 5637 op_cost(10);
4001 match(reg); 5638 match(DecodeN reg);
4002 5639
4003 format %{ "[$reg] @ baseOffset0" %} 5640 format %{ "[$reg] @ indirectNarrow" %}
4004 interface(MEMORY_INTER) %{ 5641 interface(MEMORY_INTER) %{
4005 base($reg); 5642 base($reg);
4006 index(0x0); 5643 index(0x0);
4007 scale(0x0); 5644 scale(0x0);
4008 disp(0x0); 5645 disp(0x0);
4009 %} 5646 %}
4010 %} 5647 %}
4011 5648
4012 operand baseOffset16Narrow(mRegN reg, immL16 off) 5649 // Indirect Memory Plus Short Offset Operand
5650 operand indOffset8Narrow(mRegN reg, immL8 off)
4013 %{ 5651 %{
4014 predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); 5652 predicate(Universe::narrow_oop_shift() == 0);
4015 constraint(ALLOC_IN_RC(p_reg)); 5653 constraint(ALLOC_IN_RC(p_reg));
5654 op_cost(10);
4016 match(AddP (DecodeN reg) off); 5655 match(AddP (DecodeN reg) off);
4017 5656
4018 op_cost(5); 5657 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
4019 format %{ "[$reg + $off (16-bit)] @ baseOffset16Narrow" %}
4020 interface(MEMORY_INTER) %{ 5658 interface(MEMORY_INTER) %{
4021 base($reg); 5659 base($reg);
4022 index(0x0); 5660 index(0x0);
4023 scale(0x0); 5661 scale(0x0);
4024 disp($off); 5662 disp($off);
4025 %} 5663 %}
4026 %} 5664 %}
4027 5665
4028 operand gsBaseIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off) 5666 // Indirect Memory Plus Index Register Plus Offset Operand
5667 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
4029 %{ 5668 %{
4030 predicate(UseLoongsonISA && Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); 5669 predicate(Universe::narrow_oop_shift() == 0);
4031 constraint(ALLOC_IN_RC(p_reg)); 5670 constraint(ALLOC_IN_RC(p_reg));
4032 match(AddP (AddP (DecodeN reg) lreg) off); 5671 match(AddP (AddP (DecodeN reg) lreg) off);
4033 5672
4034 op_cost(5); 5673 op_cost(10);
4035 format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8Narrow" %} 5674 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
4036 interface(MEMORY_INTER) %{ 5675 interface(MEMORY_INTER) %{
4037 base($reg); 5676 base($reg);
4038 index($lreg); 5677 index($lreg);
4039 scale(0x0); 5678 scale(0x0);
4040 disp($off); 5679 disp($off);
4041 %} 5680 %}
4042 %} 5681 %}
4043 5682
4044 operand baseOffset0Narrow(mRegN reg) 5683 //----------Load Long Memory Operands------------------------------------------
4045 %{ 5684 // The load-long idiom will use it's address expression again after loading
4046 predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); 5685 // the first word of the long. If the load-long destination overlaps with
5686 // registers used in the addressing expression, the 2nd half will be loaded
5687 // from a clobbered address. Fix this by requiring that load-long use
5688 // address registers that do not overlap with the load-long target.
5689
5690 // load-long support
5691 operand load_long_RegP() %{
4047 constraint(ALLOC_IN_RC(p_reg)); 5692 constraint(ALLOC_IN_RC(p_reg));
4048 match(DecodeN reg); 5693 match(RegP);
4049 5694 match(mRegP);
4050 op_cost(10); 5695 op_cost(100);
4051 format %{ "[$reg] @ baseOffset0Narrow" %} 5696 format %{ %}
5697 interface(REG_INTER);
5698 %}
5699
5700 // Indirect Memory Operand Long
5701 operand load_long_indirect(load_long_RegP reg) %{
5702 constraint(ALLOC_IN_RC(p_reg));
5703 match(reg);
5704
5705 format %{ "[$reg]" %}
4052 interface(MEMORY_INTER) %{ 5706 interface(MEMORY_INTER) %{
4053 base($reg); 5707 base($reg);
4054 index(0x0); 5708 index(0x0);
4055 scale(0x0); 5709 scale(0x0);
4056 disp(0x0); 5710 disp(0x0);
4057 %} 5711 %}
4058 %} 5712 %}
4059 5713
4060 operand baseOffset16NarrowKlass(mRegN reg, immL16 off) 5714 // Indirect Memory Plus Long Offset Operand
4061 %{ 5715 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
4062 predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0); 5716 match(AddP reg off);
4063 constraint(ALLOC_IN_RC(p_reg)); 5717
4064 match(AddP (DecodeNKlass reg) off); 5718 format %{ "[$reg + $off]" %}
4065
4066 op_cost(5);
4067 format %{ "[$reg + $off (16-bit)] @ baseOffset16NarrowKlass" %}
4068 interface(MEMORY_INTER) %{ 5719 interface(MEMORY_INTER) %{
4069 base($reg); 5720 base($reg);
4070 index(0x0); 5721 index(0x0);
4071 scale(0x0); 5722 scale(0x0);
4072 disp($off); 5723 disp($off);
4073 %} 5724 %}
4074 %} 5725 %}
4075
4076 operand baseOffset0NarrowKlass(mRegN reg)
4077 %{
4078 predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
4079 constraint(ALLOC_IN_RC(p_reg));
4080 match(DecodeNKlass reg);
4081
4082 op_cost(10);
4083 format %{ "[$reg] @ baseOffset0NarrowKlass" %}
4084 interface(MEMORY_INTER) %{
4085 base($reg);
4086 index(0x0);
4087 scale(0x0);
4088 disp(0x0);
4089 %}
4090 %}
4091
4092 operand gsBaseIndexOffset8NarrowKlass(mRegN reg, mRegL lreg, immL8 off)
4093 %{
4094 predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
4095 constraint(ALLOC_IN_RC(p_reg));
4096 match(AddP (AddP (DecodeNKlass reg) lreg) off);
4097
4098 op_cost(5);
4099 format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8NarrowKlass" %}
4100 interface(MEMORY_INTER) %{
4101 base($reg);
4102 index($lreg);
4103 scale(0x0);
4104 disp($off);
4105 %}
4106 %}
4107
4108 operand gsBaseIndexOffset0NarrowKlass(mRegN reg, mRegL lreg)
4109 %{
4110 predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
4111 constraint(ALLOC_IN_RC(p_reg));
4112 match(AddP (DecodeNKlass reg) lreg);
4113
4114 op_cost(10);
4115 format %{"[$reg + $lreg] @ gsBaseIndexOffset0NarrowKlass" %}
4116 interface(MEMORY_INTER) %{
4117 base($reg);
4118 index($lreg);
4119 scale(0x0);
4120 disp(0x0);
4121 %}
4122 %}
4123
4124
4125 //------------------------OPERAND CLASSES--------------------------------------
4126 opclass memory(
4127 baseOffset16,
4128 gsBaseIndexOffset8,
4129 gsBaseIndexI2LOffset8,
4130 gsBaseIndexOffset0,
4131 baseOffset0,
4132
4133 baseOffset16Narrow,
4134 gsBaseIndexOffset8Narrow,
4135 baseOffset0Narrow,
4136
4137 baseOffset16NarrowKlass,
4138 baseOffset0NarrowKlass,
4139 gsBaseIndexOffset8NarrowKlass,
4140 gsBaseIndexOffset0NarrowKlass
4141 );
4142
4143 // For loading unsigned values
4144 // umemory --> unsigned memory
4145 opclass umemory(
4146 baseOffset16,
4147 baseOffset0,
4148
4149 baseOffset16Narrow,
4150 baseOffset0Narrow,
4151
4152 baseOffset16NarrowKlass,
4153 baseOffset0NarrowKlass
4154 );
4155
4156 5726
4157 //----------Conditional Branch Operands---------------------------------------- 5727 //----------Conditional Branch Operands----------------------------------------
4158 // Comparison Op - This is the operation of the comparison, and is limited to 5728 // Comparison Op - This is the operation of the comparison, and is limited to
4159 // the following set of codes: 5729 // the following set of codes:
4160 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 5730 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4204 overflow(0x7); 5774 overflow(0x7);
4205 no_overflow(0x8); 5775 no_overflow(0x8);
4206 %} 5776 %}
4207 %} 5777 %}
4208 5778
5779 /*
5780 // Comparison Code, unsigned compare. Used by FP also, with
5781 // C2 (unordered) turned into GT or LT already. The other bits
5782 // C0 and C3 are turned into Carry & Zero flags.
5783 operand cmpOpU() %{
5784 match(Bool);
5785
5786 format %{ "" %}
5787 interface(COND_INTER) %{
5788 equal(0x4);
5789 not_equal(0x5);
5790 less(0x2);
5791 greater_equal(0x3);
5792 less_equal(0x6);
5793 greater(0x7);
5794 %}
5795 %}
5796 */
5797 /*
5798 // Comparison Code for FP conditional move
5799 operand cmpOp_fcmov() %{
5800 match(Bool);
5801
5802 format %{ "" %}
5803 interface(COND_INTER) %{
5804 equal (0x01);
5805 not_equal (0x02);
5806 greater (0x03);
5807 greater_equal(0x04);
5808 less (0x05);
5809 less_equal (0x06);
5810 %}
5811 %}
5812
5813 // Comparision Code used in long compares
5814 operand cmpOp_commute() %{
5815 match(Bool);
5816
5817 format %{ "" %}
5818 interface(COND_INTER) %{
5819 equal(0x4);
5820 not_equal(0x5);
5821 less(0xF);
5822 greater_equal(0xE);
5823 less_equal(0xD);
5824 greater(0xC);
5825 %}
5826 %}
5827 */
4209 5828
4210 //----------Special Memory Operands-------------------------------------------- 5829 //----------Special Memory Operands--------------------------------------------
4211 // Stack Slot Operand - This operand is used for loading and storing temporary 5830 // Stack Slot Operand - This operand is used for loading and storing temporary
4212 // values on the stack where a match requires a value to 5831 // values on the stack where a match requires a value to
4213 // flow through memory. 5832 // flow through memory.
4273 index(0x0); // No Index 5892 index(0x0); // No Index
4274 scale(0x0); // No Scale 5893 scale(0x0); // No Scale
4275 disp($reg); // Stack Offset 5894 disp($reg); // Stack Offset
4276 %} 5895 %}
4277 %} 5896 %}
5897
5898
5899 //------------------------OPERAND CLASSES--------------------------------------
5900 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5901 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5902
4278 5903
4279 //----------PIPELINE----------------------------------------------------------- 5904 //----------PIPELINE-----------------------------------------------------------
4280 // Rules which define the behavior of the target architectures pipeline. 5905 // Rules which define the behavior of the target architectures pipeline.
4281 5906
4282 pipeline %{ 5907 pipeline %{
4626 ins_encode(load_B_enc(dst, mem)); 6251 ins_encode(load_B_enc(dst, mem));
4627 ins_pipe(ialu_loadI); 6252 ins_pipe(ialu_loadI);
4628 %} 6253 %}
4629 6254
4630 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned) 6255 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
4631 instruct loadI2UB(mRegI dst, umemory mem, immI_255 mask) %{ 6256 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
4632 match(Set dst (AndI (LoadI mem) mask)); 6257 match(Set dst (AndI (LoadI mem) mask));
4633 6258
4634 ins_cost(125); 6259 ins_cost(125);
4635 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %} 6260 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
4636 ins_encode(load_UB_enc(dst, mem)); 6261 ins_encode(load_UB_enc(dst, mem));
4646 ins_encode(load_S_enc(dst, mem)); 6271 ins_encode(load_S_enc(dst, mem));
4647 ins_pipe(ialu_loadI); 6272 ins_pipe(ialu_loadI);
4648 %} 6273 %}
4649 6274
4650 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned) 6275 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
4651 instruct loadI2US(mRegI dst, umemory mem, immI_65535 mask) %{ 6276 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
4652 match(Set dst (AndI (LoadI mem) mask)); 6277 match(Set dst (AndI (LoadI mem) mask));
4653 6278
4654 ins_cost(125); 6279 ins_cost(125);
4655 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %} 6280 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
4656 ins_encode(load_C_enc(dst, mem)); 6281 ins_encode(load_C_enc(dst, mem));
4687 format %{ "sd $mem, $src #@storeL_reg\n" %} 6312 format %{ "sd $mem, $src #@storeL_reg\n" %}
4688 ins_encode(store_L_reg_enc(mem, src)); 6313 ins_encode(store_L_reg_enc(mem, src));
4689 ins_pipe( ialu_storeL ); 6314 ins_pipe( ialu_storeL );
4690 %} 6315 %}
4691 6316
4692
4693 instruct storeL_immL0(memory mem, immL0 zero) %{ 6317 instruct storeL_immL0(memory mem, immL0 zero) %{
4694 match(Set mem (StoreL mem zero)); 6318 match(Set mem (StoreL mem zero));
4695 6319
4696 ins_cost(180); 6320 ins_cost(180);
4697 format %{ "sd $mem, zero #@storeL_immL0" %} 6321 format %{ "sd zero, $mem #@storeL_immL0" %}
4698 ins_encode(store_L_immL0_enc(mem)); 6322 ins_encode(store_L_immL0_enc(mem, zero));
4699 ins_pipe( ialu_storeL ); 6323 ins_pipe( ialu_storeL );
4700 %} 6324 %}
4701 6325
6326 instruct storeL_imm(memory mem, immL src) %{
6327 match(Set mem (StoreL mem src));
6328
6329 ins_cost(200);
6330 format %{ "sd $src, $mem #@storeL_imm" %}
6331 ins_encode(store_L_immL_enc(mem, src));
6332 ins_pipe( ialu_storeL );
6333 %}
6334
4702 // Load Compressed Pointer 6335 // Load Compressed Pointer
4703 instruct loadN(mRegN dst, umemory mem) 6336 instruct loadN(mRegN dst, memory mem)
4704 %{ 6337 %{
4705 match(Set dst (LoadN mem)); 6338 match(Set dst (LoadN mem));
4706 6339
4707 ins_cost(125); // XXX 6340 ins_cost(125); // XXX
4708 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %} 6341 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
4709 ins_encode (load_N_enc(dst, mem)); 6342 ins_encode (load_N_enc(dst, mem));
4710 ins_pipe( ialu_loadI ); // XXX 6343 ins_pipe( ialu_loadI ); // XXX
4711 %} 6344 %}
4712 6345
4713 instruct loadN2P(mRegP dst, umemory mem) 6346 instruct loadN2P(mRegP dst, memory mem)
4714 %{ 6347 %{
4715 match(Set dst (DecodeN (LoadN mem))); 6348 match(Set dst (DecodeN (LoadN mem)));
4716 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0); 6349 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
4717 6350
4718 ins_cost(125); // XXX 6351 ins_cost(125); // XXX
4740 ins_encode (load_P_enc(dst, mem)); 6373 ins_encode (load_P_enc(dst, mem));
4741 ins_pipe( ialu_loadI ); 6374 ins_pipe( ialu_loadI );
4742 %} 6375 %}
4743 6376
4744 // Load narrow Klass Pointer 6377 // Load narrow Klass Pointer
4745 instruct loadNKlass(mRegN dst, umemory mem) 6378 instruct loadNKlass(mRegN dst, memory mem)
4746 %{ 6379 %{
4747 match(Set dst (LoadNKlass mem)); 6380 match(Set dst (LoadNKlass mem));
4748 6381
4749 ins_cost(125); // XXX 6382 ins_cost(125); // XXX
4750 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %} 6383 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
4751 ins_encode (load_N_enc(dst, mem)); 6384 ins_encode (load_N_enc(dst, mem));
4752 ins_pipe( ialu_loadI ); // XXX 6385 ins_pipe( ialu_loadI ); // XXX
4753 %} 6386 %}
4754 6387
4755 instruct loadN2PKlass(mRegP dst, umemory mem) 6388 instruct loadN2PKlass(mRegP dst, memory mem)
4756 %{ 6389 %{
4757 match(Set dst (DecodeNKlass (LoadNKlass mem))); 6390 match(Set dst (DecodeNKlass (LoadNKlass mem)));
4758 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0); 6391 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
4759 6392
4760 ins_cost(125); // XXX 6393 ins_cost(125); // XXX
4865 format %{ "mov $mem, $zero #@storeImmP0" %} 6498 format %{ "mov $mem, $zero #@storeImmP0" %}
4866 ins_encode(store_P_immP0_enc(mem)); 6499 ins_encode(store_P_immP0_enc(mem));
4867 ins_pipe( ialu_storeI ); 6500 ins_pipe( ialu_storeI );
4868 %} 6501 %}
4869 6502
6503 // Store NULL Pointer, mark word, or other simple pointer constant.
6504 instruct storeImmP(memory mem, immP31 src) %{
6505 match(Set mem (StoreP mem src));
6506
6507 ins_cost(150);
6508 format %{ "mov $mem, $src #@storeImmP" %}
6509 ins_encode(store_P_immP_enc(mem, src));
6510 ins_pipe( ialu_storeI );
6511 %}
6512
6513 // Store Byte Immediate
6514 instruct storeImmB(memory mem, immI8 src) %{
6515 match(Set mem (StoreB mem src));
6516
6517 ins_cost(150);
6518 format %{ "movb $mem, $src #@storeImmB" %}
6519 ins_encode(store_B_immI_enc(mem, src));
6520 ins_pipe( ialu_storeI );
6521 %}
6522
4870 // Store Compressed Pointer 6523 // Store Compressed Pointer
4871 instruct storeN(memory mem, mRegN src) 6524 instruct storeN(memory mem, mRegN src)
4872 %{ 6525 %{
4873 match(Set mem (StoreN mem src)); 6526 match(Set mem (StoreN mem src));
4874 6527
4913 instruct storeImmN0(memory mem, immN0 zero) 6566 instruct storeImmN0(memory mem, immN0 zero)
4914 %{ 6567 %{
4915 match(Set mem (StoreN mem zero)); 6568 match(Set mem (StoreN mem zero));
4916 6569
4917 ins_cost(125); // XXX 6570 ins_cost(125); // XXX
4918 format %{ "storeN0 $mem, R12\t# compressed ptr" %} 6571 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
4919 ins_encode(storeImmN0_enc(mem)); 6572 ins_encode(storeImmN0_enc(mem, zero));
6573 ins_pipe( ialu_storeI );
6574 %}
6575
6576 instruct storeImmN(memory mem, immN src)
6577 %{
6578 match(Set mem (StoreN mem src));
6579
6580 ins_cost(150);
6581 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6582 ins_encode(storeImmN_enc(mem, src));
6583 ins_pipe( ialu_storeI );
6584 %}
6585
6586 instruct storeImmNKlass(memory mem, immNKlass src)
6587 %{
6588 match(Set mem (StoreNKlass mem src));
6589
6590 ins_cost(150); // XXX
6591 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6592 ins_encode(storeImmNKlass_enc(mem, src));
4920 ins_pipe( ialu_storeI ); 6593 ins_pipe( ialu_storeI );
4921 %} 6594 %}
4922 6595
4923 // Store Byte 6596 // Store Byte
4924 instruct storeB(memory mem, mRegI src) %{ 6597 instruct storeB(memory mem, mRegI src) %{
4928 format %{ "sb $src, $mem #@storeB" %} 6601 format %{ "sb $src, $mem #@storeB" %}
4929 ins_encode(store_B_reg_enc(mem, src)); 6602 ins_encode(store_B_reg_enc(mem, src));
4930 ins_pipe( ialu_storeI ); 6603 ins_pipe( ialu_storeI );
4931 %} 6604 %}
4932 6605
4933 instruct storeB0(memory mem, immI0 zero) %{
4934 match(Set mem (StoreB mem zero));
4935
4936 ins_cost(100);
4937 format %{ "sb $zero, $mem #@storeB0" %}
4938 ins_encode(store_B0_enc(mem));
4939 ins_pipe( ialu_storeI );
4940 %}
4941
4942 instruct storeB_convL2I(memory mem, mRegL src) %{ 6606 instruct storeB_convL2I(memory mem, mRegL src) %{
4943 match(Set mem (StoreB mem (ConvL2I src))); 6607 match(Set mem (StoreB mem (ConvL2I src)));
4944 6608
4945 ins_cost(125); 6609 ins_cost(125);
4946 format %{ "sb $src, $mem #@storeB_convL2I" %} 6610 format %{ "sb $src, $mem #@storeB_convL2I" %}
4966 ins_encode(load_B_enc(dst, mem)); 6630 ins_encode(load_B_enc(dst, mem));
4967 ins_pipe( ialu_loadI ); 6631 ins_pipe( ialu_loadI );
4968 %} 6632 %}
4969 6633
4970 // Load Byte (8bit UNsigned) 6634 // Load Byte (8bit UNsigned)
4971 instruct loadUB(mRegI dst, umemory mem) %{ 6635 instruct loadUB(mRegI dst, memory mem) %{
4972 match(Set dst (LoadUB mem)); 6636 match(Set dst (LoadUB mem));
4973 6637
4974 ins_cost(125); 6638 ins_cost(125);
4975 format %{ "lbu $dst, $mem #@loadUB" %} 6639 format %{ "lbu $dst, $mem #@loadUB" %}
4976 ins_encode(load_UB_enc(dst, mem)); 6640 ins_encode(load_UB_enc(dst, mem));
4977 ins_pipe( ialu_loadI ); 6641 ins_pipe( ialu_loadI );
4978 %} 6642 %}
4979 6643
4980 instruct loadUB_convI2L(mRegL dst, umemory mem) %{ 6644 instruct loadUB_convI2L(mRegL dst, memory mem) %{
4981 match(Set dst (ConvI2L (LoadUB mem))); 6645 match(Set dst (ConvI2L (LoadUB mem)));
4982 6646
4983 ins_cost(125); 6647 ins_cost(125);
4984 format %{ "lbu $dst, $mem #@loadUB_convI2L" %} 6648 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
4985 ins_encode(load_UB_enc(dst, mem)); 6649 ins_encode(load_UB_enc(dst, mem));
5014 ins_encode(load_S_enc(dst, mem)); 6678 ins_encode(load_S_enc(dst, mem));
5015 ins_pipe( ialu_loadI ); 6679 ins_pipe( ialu_loadI );
5016 %} 6680 %}
5017 6681
5018 // Store Integer Immediate 6682 // Store Integer Immediate
5019 instruct storeI0(memory mem, immI0 zero) %{ 6683 instruct storeImmI(memory mem, immI src) %{
5020 match(Set mem (StoreI mem zero)); 6684 match(Set mem (StoreI mem src));
5021 6685
5022 ins_cost(100); 6686 ins_cost(150);
5023 format %{ "sw $mem, $zero #@storeI0" %} 6687 format %{ "mov $mem, $src #@storeImmI" %}
5024 ins_encode(store_I_immI0_enc(mem)); 6688 ins_encode(store_I_immI_enc(mem, src));
5025 ins_pipe( ialu_storeI ); 6689 ins_pipe( ialu_storeI );
5026 %} 6690 %}
5027 6691
5028 // Store Integer 6692 // Store Integer
5029 instruct storeI(memory mem, mRegI src) %{ 6693 instruct storeI(memory mem, mRegI src) %{
9045 %} 10709 %}
9046 ins_pipe( ialu_regI_regI ); 10710 ins_pipe( ialu_regI_regI );
9047 %} 10711 %}
9048 */ 10712 */
9049 10713
9050 instruct lbu_and_lmask(mRegI dst, umemory mem, immI_255 mask) %{ 10714 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
9051 match(Set dst (AndI mask (LoadB mem))); 10715 match(Set dst (AndI mask (LoadB mem)));
9052 ins_cost(60); 10716 ins_cost(60);
9053 10717
9054 format %{ "lbu $dst, $mem #@lbu_and_lmask" %} 10718 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
9055 ins_encode(load_UB_enc(dst, mem)); 10719 ins_encode(load_UB_enc(dst, mem));
9056 ins_pipe( ialu_loadI ); 10720 ins_pipe( ialu_loadI );
9057 %} 10721 %}
9058 10722
9059 instruct lbu_and_rmask(mRegI dst, umemory mem, immI_255 mask) %{ 10723 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
9060 match(Set dst (AndI (LoadB mem) mask)); 10724 match(Set dst (AndI (LoadB mem) mask));
9061 ins_cost(60); 10725 ins_cost(60);
9062 10726
9063 format %{ "lbu $dst, $mem #@lbu_and_rmask" %} 10727 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
9064 ins_encode(load_UB_enc(dst, mem)); 10728 ins_encode(load_UB_enc(dst, mem));
9065 ins_pipe( ialu_loadI ); 10729 ins_pipe( ialu_loadI );
9066 %} 10730 %}
9067 10731
9068 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ 10732 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10702 ins_alignment(16); 12366 ins_alignment(16);
10703 %} 12367 %}
10704 12368
10705 // Prefetch instructions. 12369 // Prefetch instructions.
10706 12370
10707 instruct prefetchrNTA( umemory mem ) %{ 12371 instruct prefetchrNTA( memory mem ) %{
10708 match(PrefetchRead mem); 12372 match(PrefetchRead mem);
10709 ins_cost(125); 12373 ins_cost(125);
10710 12374
10711 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %} 12375 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
10712 ins_encode %{ 12376 ins_encode %{
10713 int base = $mem$$base; 12377 int base = $mem$$base;
10714 int index = $mem$$index; 12378 int index = $mem$$index;
10715 int scale = $mem$$scale; 12379 int scale = $mem$$scale;
10716 int disp = $mem$$disp; 12380 int disp = $mem$$disp;
10717 12381
10718 assert(index == 0, "no index"); 12382 if( index != 0 ) {
10719 __ daddiu(AT, as_Register(base), disp); 12383 if (scale == 0) {
12384 __ daddu(AT, as_Register(base), as_Register(index));
12385 } else {
12386 __ dsll(AT, as_Register(index), scale);
12387 __ daddu(AT, as_Register(base), AT);
12388 }
12389 } else {
12390 __ move(AT, as_Register(base));
12391 }
12392 if( Assembler::is_simm16(disp) ) {
12393 __ daddiu(AT, as_Register(base), disp);
12394 __ daddiu(AT, AT, disp);
12395 } else {
12396 __ move(T9, disp);
12397 __ daddu(AT, as_Register(base), T9);
12398 }
10720 __ pref(0, AT, 0); //hint: 0:load 12399 __ pref(0, AT, 0); //hint: 0:load
10721 %} 12400 %}
10722 ins_pipe(pipe_slow); 12401 ins_pipe(pipe_slow);
10723 %} 12402 %}
10724 12403
10725 instruct prefetchwNTA( umemory mem ) %{ 12404 instruct prefetchwNTA( memory mem ) %{
10726 match(PrefetchWrite mem); 12405 match(PrefetchWrite mem);
10727 ins_cost(125); 12406 ins_cost(125);
10728 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %} 12407 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
10729 ins_encode %{ 12408 ins_encode %{
10730 int base = $mem$$base; 12409 int base = $mem$$base;
10731 int index = $mem$$index; 12410 int index = $mem$$index;
10732 int scale = $mem$$scale; 12411 int scale = $mem$$scale;
10733 int disp = $mem$$disp; 12412 int disp = $mem$$disp;
10734 12413
10735 assert(index == 0, "no index"); 12414 if( index != 0 ) {
10736 __ daddiu(AT, as_Register(base), disp); 12415 if (scale == 0) {
10737 __ pref(1, AT, 0); //hint: 1:store 12416 __ daddu(AT, as_Register(base), as_Register(index));
12417 } else {
12418 __ dsll(AT, as_Register(index), scale);
12419 __ daddu(AT, as_Register(base), AT);
12420 }
12421 } else {
12422 __ move(AT, as_Register(base));
12423 }
12424 if( Assembler::is_simm16(disp) ) {
12425 __ daddiu(AT, as_Register(base), disp);
12426 __ daddiu(AT, AT, disp);
12427 } else {
12428 __ move(T9, disp);
12429 __ daddu(AT, as_Register(base), T9);
12430 }
12431 __ pref(1, AT, 0); //hint: 1:store
10738 %} 12432 %}
10739 ins_pipe(pipe_slow); 12433 ins_pipe(pipe_slow);
10740 %} 12434 %}
10741 12435
10742 // Prefetch instructions for allocation. 12436 // Prefetch instructions for allocation.
10752 int disp = $mem$$disp; 12446 int disp = $mem$$disp;
10753 12447
10754 Register dst = R0; 12448 Register dst = R0;
10755 12449
10756 if( index != 0 ) { 12450 if( index != 0 ) {
10757 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 12451 if( Assembler::is_simm16(disp) ) {
10758 __ gslbx(dst, as_Register(base), as_Register(index), disp); 12452 if( UseLoongsonISA ) {
12453 if (scale == 0) {
12454 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12455 } else {
12456 __ dsll(AT, as_Register(index), scale);
12457 __ gslbx(dst, as_Register(base), AT, disp);
12458 }
12459 } else {
12460 if (scale == 0) {
12461 __ addu(AT, as_Register(base), as_Register(index));
12462 } else {
12463 __ dsll(AT, as_Register(index), scale);
12464 __ addu(AT, as_Register(base), AT);
12465 }
12466 __ lb(dst, AT, disp);
12467 }
12468 } else {
12469 if (scale == 0) {
12470 __ addu(AT, as_Register(base), as_Register(index));
12471 } else {
12472 __ dsll(AT, as_Register(index), scale);
12473 __ addu(AT, as_Register(base), AT);
12474 }
12475 __ move(T9, disp);
12476 if( UseLoongsonISA ) {
12477 __ gslbx(dst, AT, T9, 0);
12478 } else {
12479 __ addu(AT, AT, T9);
12480 __ lb(dst, AT, 0);
12481 }
12482 }
10759 } else { 12483 } else {
10760 __ lb(dst, as_Register(base), disp); 12484 if( Assembler::is_simm16(disp) ) {
12485 __ lb(dst, as_Register(base), disp);
12486 } else {
12487 __ move(T9, disp);
12488 if( UseLoongsonISA ) {
12489 __ gslbx(dst, as_Register(base), T9, 0);
12490 } else {
12491 __ addu(AT, as_Register(base), T9);
12492 __ lb(dst, AT, 0);
12493 }
12494 }
10761 } 12495 }
10762 %} 12496 %}
10763 ins_pipe(pipe_slow); 12497 ins_pipe(pipe_slow);
10764 %} 12498 %}
10765 12499
10776 ins_pc_relative(1); 12510 ins_pc_relative(1);
10777 ins_alignment(16); 12511 ins_alignment(16);
10778 %} 12512 %}
10779 12513
10780 // Load Char (16bit unsigned) 12514 // Load Char (16bit unsigned)
10781 instruct loadUS(mRegI dst, umemory mem) %{ 12515 instruct loadUS(mRegI dst, memory mem) %{
10782 match(Set dst (LoadUS mem)); 12516 match(Set dst (LoadUS mem));
10783 12517
10784 ins_cost(125); 12518 ins_cost(125);
10785 format %{ "loadUS $dst,$mem @ loadC" %} 12519 format %{ "loadUS $dst,$mem @ loadC" %}
10786 ins_encode(load_C_enc(dst, mem)); 12520 ins_encode(load_C_enc(dst, mem));
10787 ins_pipe( ialu_loadI ); 12521 ins_pipe( ialu_loadI );
10788 %} 12522 %}
10789 12523
10790 instruct loadUS_convI2L(mRegL dst, umemory mem) %{ 12524 instruct loadUS_convI2L(mRegL dst, memory mem) %{
10791 match(Set dst (ConvI2L (LoadUS mem))); 12525 match(Set dst (ConvI2L (LoadUS mem)));
10792 12526
10793 ins_cost(125); 12527 ins_cost(125);
10794 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %} 12528 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
10795 ins_encode(load_C_enc(dst, mem)); 12529 ins_encode(load_C_enc(dst, mem));
10860 12594
10861 format %{ "mov $dst, zero @ loadConD0"%} 12595 format %{ "mov $dst, zero @ loadConD0"%}
10862 ins_encode %{ 12596 ins_encode %{
10863 FloatRegister dst = as_FloatRegister($dst$$reg); 12597 FloatRegister dst = as_FloatRegister($dst$$reg);
10864 12598
10865 __ dmtc1(R0, dst); 12599 __ dmtc1(R0, dst);
10866 %} 12600 %}
10867 ins_pipe( fpu_loadF ); 12601 ins_pipe( fpu_loadF );
10868 %} 12602 %}
10869 12603
10870 instruct loadConD(regD dst, immD src) %{ 12604 instruct loadConD(regD dst, immD src) %{
10910 int index = $mem$$index; 12644 int index = $mem$$index;
10911 int scale = $mem$$scale; 12645 int scale = $mem$$scale;
10912 int disp = $mem$$disp; 12646 int disp = $mem$$disp;
10913 12647
10914 if( index != 0 ) { 12648 if( index != 0 ) {
10915 assert(UseLoongsonISA, "Only supported for Loongson CPUs"); 12649 if ( UseLoongsonISA ) {
10916 __ gsswx(R0, as_Register(base), as_Register(index), disp); 12650 if ( Assembler::is_simm(disp, 8) ) {
10917 } else { 12651 if ( scale == 0 ) {
10918 __ sw(R0, as_Register(base), disp); 12652 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12653 } else {
12654 __ dsll(T9, as_Register(index), scale);
12655 __ gsswx(R0, as_Register(base), T9, disp);
12656 }
12657 } else if ( Assembler::is_simm16(disp) ) {
12658 if ( scale == 0 ) {
12659 __ daddu(AT, as_Register(base), as_Register(index));
12660 } else {
12661 __ dsll(T9, as_Register(index), scale);
12662 __ daddu(AT, as_Register(base), T9);
12663 }
12664 __ sw(R0, AT, disp);
12665 } else {
12666 if ( scale == 0 ) {
12667 __ move(T9, disp);
12668 __ daddu(AT, as_Register(index), T9);
12669 __ gsswx(R0, as_Register(base), AT, 0);
12670 } else {
12671 __ dsll(T9, as_Register(index), scale);
12672 __ move(AT, disp);
12673 __ daddu(AT, AT, T9);
12674 __ gsswx(R0, as_Register(base), AT, 0);
12675 }
12676 }
12677 } else { //not use loongson isa
12678 if(scale != 0) {
12679 __ dsll(T9, as_Register(index), scale);
12680 __ daddu(AT, as_Register(base), T9);
12681 } else {
12682 __ daddu(AT, as_Register(base), as_Register(index));
12683 }
12684 if( Assembler::is_simm16(disp) ) {
12685 __ sw(R0, AT, disp);
12686 } else {
12687 __ move(T9, disp);
12688 __ daddu(AT, AT, T9);
12689 __ sw(R0, AT, 0);
12690 }
12691 }
12692 } else { //index is 0
12693 if ( UseLoongsonISA ) {
12694 if ( Assembler::is_simm16(disp) ) {
12695 __ sw(R0, as_Register(base), disp);
12696 } else {
12697 __ move(T9, disp);
12698 __ gsswx(R0, as_Register(base), T9, 0);
12699 }
12700 } else {
12701 if( Assembler::is_simm16(disp) ) {
12702 __ sw(R0, as_Register(base), disp);
12703 } else {
12704 __ move(T9, disp);
12705 __ daddu(AT, as_Register(base), T9);
12706 __ sw(R0, AT, 0);
12707 }
12708 }
10919 } 12709 }
10920 %} 12710 %}
10921 ins_pipe( ialu_storeI ); 12711 ins_pipe( ialu_storeI );
10922 %} 12712 %}
10923 12713
10948 format %{ "store $mem, $src\t# store float @ storeD_reg" %} 12738 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
10949 ins_encode(store_D_reg_enc(mem, src)); 12739 ins_encode(store_D_reg_enc(mem, src));
10950 ins_pipe( fpu_storeF ); 12740 ins_pipe( fpu_storeF );
10951 %} 12741 %}
10952 12742
12743 instruct storeD_imm0( memory mem, immD0 zero) %{
12744 match(Set mem (StoreD mem zero));
12745
12746 ins_cost(40);
12747 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12748 ins_encode %{
12749 int base = $mem$$base;
12750 int index = $mem$$index;
12751 int scale = $mem$$scale;
12752 int disp = $mem$$disp;
12753
12754 __ mtc1(R0, F30);
12755 __ cvt_d_w(F30, F30);
12756
12757 if( index != 0 ) {
12758 if ( UseLoongsonISA ) {
12759 if ( Assembler::is_simm(disp, 8) ) {
12760 if (scale == 0) {
12761 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12762 } else {
12763 __ dsll(T9, as_Register(index), scale);
12764 __ gssdxc1(F30, as_Register(base), T9, disp);
12765 }
12766 } else if ( Assembler::is_simm16(disp) ) {
12767 if (scale == 0) {
12768 __ daddu(AT, as_Register(base), as_Register(index));
12769 __ sdc1(F30, AT, disp);
12770 } else {
12771 __ dsll(T9, as_Register(index), scale);
12772 __ daddu(AT, as_Register(base), T9);
12773 __ sdc1(F30, AT, disp);
12774 }
12775 } else {
12776 if (scale == 0) {
12777 __ move(T9, disp);
12778 __ daddu(AT, as_Register(index), T9);
12779 __ gssdxc1(F30, as_Register(base), AT, 0);
12780 } else {
12781 __ move(T9, disp);
12782 __ dsll(AT, as_Register(index), scale);
12783 __ daddu(AT, AT, T9);
12784 __ gssdxc1(F30, as_Register(base), AT, 0);
12785 }
12786 }
12787 } else { // not use loongson isa
12788 if(scale != 0) {
12789 __ dsll(T9, as_Register(index), scale);
12790 __ daddu(AT, as_Register(base), T9);
12791 } else {
12792 __ daddu(AT, as_Register(base), as_Register(index));
12793 }
12794 if( Assembler::is_simm16(disp) ) {
12795 __ sdc1(F30, AT, disp);
12796 } else {
12797 __ move(T9, disp);
12798 __ daddu(AT, AT, T9);
12799 __ sdc1(F30, AT, 0);
12800 }
12801 }
12802 } else {// index is 0
12803 if ( UseLoongsonISA ) {
12804 if ( Assembler::is_simm16(disp) ) {
12805 __ sdc1(F30, as_Register(base), disp);
12806 } else {
12807 __ move(T9, disp);
12808 __ gssdxc1(F30, as_Register(base), T9, 0);
12809 }
12810 } else {
12811 if( Assembler::is_simm16(disp) ) {
12812 __ sdc1(F30, as_Register(base), disp);
12813 } else {
12814 __ move(T9, disp);
12815 __ daddu(AT, as_Register(base), T9);
12816 __ sdc1(F30, AT, 0);
12817 }
12818 }
12819 }
12820 %}
12821 ins_pipe( ialu_storeI );
12822 %}
12823
10953 instruct loadSSI(mRegI dst, stackSlotI src) 12824 instruct loadSSI(mRegI dst, stackSlotI src)
10954 %{ 12825 %{
10955 match(Set dst src); 12826 match(Set dst src);
10956 12827
10957 ins_cost(125); 12828 ins_cost(125);
11106 ins_pipe( pipe_slow ); 12977 ins_pipe( pipe_slow );
11107 ins_pc_relative(1); 12978 ins_pc_relative(1);
11108 %} 12979 %}
11109 12980
11110 // Store CMS card-mark Immediate 12981 // Store CMS card-mark Immediate
11111 instruct storeImmCM(memory mem, mRegI src) %{ 12982 instruct storeImmCM(memory mem, immI8 src) %{
11112 match(Set mem (StoreCM mem src)); 12983 match(Set mem (StoreCM mem src));
11113 12984
11114 ins_cost(500); 12985 ins_cost(150);
11115 format %{ "sb $src, $mem (CMS card-mark) @ storeImmCM" %} 12986 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
11116 ins_encode(store_B_reg_sync_enc(mem, src)); 12987 // opcode(0xC6);
11117 ins_pipe( ialu_storeI ); 12988 ins_encode(store_B_immI_enc_sync(mem, src));
11118 %}
11119
11120 instruct storeI0CM(memory mem, immI0 zero) %{
11121 match(Set mem (StoreCM mem zero));
11122
11123 ins_cost(450);
11124 format %{ "sb $zero, $mem (CMS card-mark) @ storeI0CM" %}
11125 ins_encode(store_B0_sync_enc(mem));
11126 ins_pipe( ialu_storeI ); 12989 ins_pipe( ialu_storeI );
11127 %} 12990 %}
11128 12991
11129 // Die now 12992 // Die now
11130 instruct ShouldNotReachHere( ) 12993 instruct ShouldNotReachHere( )
11139 13002
11140 __ stop("in ShoudNotReachHere"); 13003 __ stop("in ShoudNotReachHere");
11141 13004
11142 %} 13005 %}
11143 ins_pipe( pipe_jump ); 13006 ins_pipe( pipe_jump );
13007 %}
13008
13009 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
13010 %{
13011 predicate(Universe::narrow_oop_shift() == 0);
13012 match(Set dst mem);
13013
13014 ins_cost(110);
13015 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
13016 ins_encode %{
13017 Register dst = $dst$$Register;
13018 Register base = as_Register($mem$$base);
13019 int disp = $mem$$disp;
13020
13021 __ daddiu(dst, base, disp);
13022 %}
13023 ins_pipe( ialu_regI_imm16 );
13024 %}
13025
13026 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
13027 %{
13028 match(Set dst mem);
13029
13030 ins_cost(110);
13031 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
13032 ins_encode %{
13033 Register dst = $dst$$Register;
13034 Register base = as_Register($mem$$base);
13035 Register index = as_Register($mem$$index);
13036 int scale = $mem$$scale;
13037 int disp = $mem$$disp;
13038
13039 if (scale == 0) {
13040 __ daddu(AT, base, index);
13041 __ daddiu(dst, AT, disp);
13042 } else {
13043 __ dsll(AT, index, scale);
13044 __ daddu(AT, base, AT);
13045 __ daddiu(dst, AT, disp);
13046 }
13047 %}
13048
13049 ins_pipe( ialu_regI_imm16 );
13050 %}
13051
13052 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13053 %{
13054 match(Set dst mem);
13055
13056 ins_cost(110);
13057 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13058 ins_encode %{
13059 Register dst = $dst$$Register;
13060 Register base = as_Register($mem$$base);
13061 Register index = as_Register($mem$$index);
13062 int scale = $mem$$scale;
13063
13064 if (scale == 0) {
13065 __ daddu(dst, base, index);
13066 } else {
13067 __ dsll(AT, index, scale);
13068 __ daddu(dst, base, AT);
13069 }
13070 %}
13071
13072 ins_pipe( ialu_regI_imm16 );
11144 %} 13073 %}
11145 13074
11146 // Jump Direct Conditional - Label defines a relative address from Jcc+1 13075 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11147 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{ 13076 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
11148 match(CountedLoopEnd cop (CmpI src1 src2)); 13077 match(CountedLoopEnd cop (CmpI src1 src2));
11656 ins_pipe(ialu_regI_regI); 13585 ins_pipe(ialu_regI_regI);
11657 %} 13586 %}
11658 13587
11659 // Match loading integer and casting it to unsigned int in long register. 13588 // Match loading integer and casting it to unsigned int in long register.
11660 // LoadI + ConvI2L + AndL 0xffffffff. 13589 // LoadI + ConvI2L + AndL 0xffffffff.
11661 instruct loadUI2L_rmask(mRegL dst, umemory mem, immL_32bits mask) %{ 13590 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
11662 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 13591 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
11663 13592
11664 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %} 13593 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
11665 ins_encode (load_N_enc(dst, mem)); 13594 ins_encode (load_N_enc(dst, mem));
11666 ins_pipe(ialu_loadI); 13595 ins_pipe(ialu_loadI);
11667 %} 13596 %}
11668 13597
11669 instruct loadUI2L_lmask(mRegL dst, umemory mem, immL_32bits mask) %{ 13598 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
11670 match(Set dst (AndL mask (ConvI2L (LoadI mem)))); 13599 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
11671 13600
11672 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %} 13601 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
11673 ins_encode (load_N_enc(dst, mem)); 13602 ins_encode (load_N_enc(dst, mem));
11674 ins_pipe(ialu_loadI); 13603 ins_pipe(ialu_loadI);

mercurial