Mon, 13 Feb 2012 02:29:22 -0800
7141329: Strange values of stack_size in -XX:+TraceMethodHandles output
Reviewed-by: kvn, never
kvn@3390 | 1 | // |
kvn@3390 | 2 | // Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. |
kvn@3390 | 3 | // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
kvn@3390 | 4 | // |
kvn@3390 | 5 | // This code is free software; you can redistribute it and/or modify it |
kvn@3390 | 6 | // under the terms of the GNU General Public License version 2 only, as |
kvn@3390 | 7 | // published by the Free Software Foundation. |
kvn@3390 | 8 | // |
kvn@3390 | 9 | // This code is distributed in the hope that it will be useful, but WITHOUT |
kvn@3390 | 10 | // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
kvn@3390 | 11 | // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
kvn@3390 | 12 | // version 2 for more details (a copy is included in the LICENSE file that |
kvn@3390 | 13 | // accompanied this code). |
kvn@3390 | 14 | // |
kvn@3390 | 15 | // You should have received a copy of the GNU General Public License version |
kvn@3390 | 16 | // 2 along with this work; if not, write to the Free Software Foundation, |
kvn@3390 | 17 | // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
kvn@3390 | 18 | // |
kvn@3390 | 19 | // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
kvn@3390 | 20 | // or visit www.oracle.com if you need additional information or have any |
kvn@3390 | 21 | // questions. |
kvn@3390 | 22 | // |
kvn@3390 | 23 | // |
kvn@3390 | 24 | |
kvn@3390 | 25 | // X86 Common Architecture Description File |
kvn@3390 | 26 | |
kvn@3390 | 27 | source %{ |
kvn@3390 | 28 | // Float masks come from different places depending on platform. |
kvn@3390 | 29 | #ifdef _LP64 |
kvn@3390 | 30 | static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } |
kvn@3390 | 31 | static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } |
kvn@3390 | 32 | static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } |
kvn@3390 | 33 | static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } |
kvn@3390 | 34 | #else |
kvn@3390 | 35 | static address float_signmask() { return (address)float_signmask_pool; } |
kvn@3390 | 36 | static address float_signflip() { return (address)float_signflip_pool; } |
kvn@3390 | 37 | static address double_signmask() { return (address)double_signmask_pool; } |
kvn@3390 | 38 | static address double_signflip() { return (address)double_signflip_pool; } |
kvn@3390 | 39 | #endif |
kvn@3390 | 40 | %} |
kvn@3390 | 41 | |
kvn@3390 | 42 | // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit) |
kvn@3390 | 43 | |
kvn@3390 | 44 | instruct addF_reg(regF dst, regF src) %{ |
kvn@3390 | 45 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 46 | match(Set dst (AddF dst src)); |
kvn@3390 | 47 | |
kvn@3390 | 48 | format %{ "addss $dst, $src" %} |
kvn@3390 | 49 | ins_cost(150); |
kvn@3390 | 50 | ins_encode %{ |
kvn@3390 | 51 | __ addss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 52 | %} |
kvn@3390 | 53 | ins_pipe(pipe_slow); |
kvn@3390 | 54 | %} |
kvn@3390 | 55 | |
kvn@3390 | 56 | instruct addF_mem(regF dst, memory src) %{ |
kvn@3390 | 57 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 58 | match(Set dst (AddF dst (LoadF src))); |
kvn@3390 | 59 | |
kvn@3390 | 60 | format %{ "addss $dst, $src" %} |
kvn@3390 | 61 | ins_cost(150); |
kvn@3390 | 62 | ins_encode %{ |
kvn@3390 | 63 | __ addss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 64 | %} |
kvn@3390 | 65 | ins_pipe(pipe_slow); |
kvn@3390 | 66 | %} |
kvn@3390 | 67 | |
kvn@3390 | 68 | instruct addF_imm(regF dst, immF con) %{ |
kvn@3390 | 69 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 70 | match(Set dst (AddF dst con)); |
kvn@3390 | 71 | format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 72 | ins_cost(150); |
kvn@3390 | 73 | ins_encode %{ |
kvn@3390 | 74 | __ addss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 75 | %} |
kvn@3390 | 76 | ins_pipe(pipe_slow); |
kvn@3390 | 77 | %} |
kvn@3390 | 78 | |
kvn@3390 | 79 | instruct vaddF_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 80 | predicate(UseAVX > 0); |
kvn@3390 | 81 | match(Set dst (AddF src1 src2)); |
kvn@3390 | 82 | |
kvn@3390 | 83 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 84 | ins_cost(150); |
kvn@3390 | 85 | ins_encode %{ |
kvn@3390 | 86 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 87 | %} |
kvn@3390 | 88 | ins_pipe(pipe_slow); |
kvn@3390 | 89 | %} |
kvn@3390 | 90 | |
kvn@3390 | 91 | instruct vaddF_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 92 | predicate(UseAVX > 0); |
kvn@3390 | 93 | match(Set dst (AddF src1 (LoadF src2))); |
kvn@3390 | 94 | |
kvn@3390 | 95 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 96 | ins_cost(150); |
kvn@3390 | 97 | ins_encode %{ |
kvn@3390 | 98 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 99 | %} |
kvn@3390 | 100 | ins_pipe(pipe_slow); |
kvn@3390 | 101 | %} |
kvn@3390 | 102 | |
kvn@3390 | 103 | instruct vaddF_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 104 | predicate(UseAVX > 0); |
kvn@3390 | 105 | match(Set dst (AddF src con)); |
kvn@3390 | 106 | |
kvn@3390 | 107 | format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 108 | ins_cost(150); |
kvn@3390 | 109 | ins_encode %{ |
kvn@3390 | 110 | __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 111 | %} |
kvn@3390 | 112 | ins_pipe(pipe_slow); |
kvn@3390 | 113 | %} |
kvn@3390 | 114 | |
kvn@3390 | 115 | instruct addD_reg(regD dst, regD src) %{ |
kvn@3390 | 116 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 117 | match(Set dst (AddD dst src)); |
kvn@3390 | 118 | |
kvn@3390 | 119 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 120 | ins_cost(150); |
kvn@3390 | 121 | ins_encode %{ |
kvn@3390 | 122 | __ addsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 123 | %} |
kvn@3390 | 124 | ins_pipe(pipe_slow); |
kvn@3390 | 125 | %} |
kvn@3390 | 126 | |
kvn@3390 | 127 | instruct addD_mem(regD dst, memory src) %{ |
kvn@3390 | 128 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 129 | match(Set dst (AddD dst (LoadD src))); |
kvn@3390 | 130 | |
kvn@3390 | 131 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 132 | ins_cost(150); |
kvn@3390 | 133 | ins_encode %{ |
kvn@3390 | 134 | __ addsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 135 | %} |
kvn@3390 | 136 | ins_pipe(pipe_slow); |
kvn@3390 | 137 | %} |
kvn@3390 | 138 | |
kvn@3390 | 139 | instruct addD_imm(regD dst, immD con) %{ |
kvn@3390 | 140 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 141 | match(Set dst (AddD dst con)); |
kvn@3390 | 142 | format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 143 | ins_cost(150); |
kvn@3390 | 144 | ins_encode %{ |
kvn@3390 | 145 | __ addsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 146 | %} |
kvn@3390 | 147 | ins_pipe(pipe_slow); |
kvn@3390 | 148 | %} |
kvn@3390 | 149 | |
kvn@3390 | 150 | instruct vaddD_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 151 | predicate(UseAVX > 0); |
kvn@3390 | 152 | match(Set dst (AddD src1 src2)); |
kvn@3390 | 153 | |
kvn@3390 | 154 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 155 | ins_cost(150); |
kvn@3390 | 156 | ins_encode %{ |
kvn@3390 | 157 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 158 | %} |
kvn@3390 | 159 | ins_pipe(pipe_slow); |
kvn@3390 | 160 | %} |
kvn@3390 | 161 | |
kvn@3390 | 162 | instruct vaddD_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 163 | predicate(UseAVX > 0); |
kvn@3390 | 164 | match(Set dst (AddD src1 (LoadD src2))); |
kvn@3390 | 165 | |
kvn@3390 | 166 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 167 | ins_cost(150); |
kvn@3390 | 168 | ins_encode %{ |
kvn@3390 | 169 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 170 | %} |
kvn@3390 | 171 | ins_pipe(pipe_slow); |
kvn@3390 | 172 | %} |
kvn@3390 | 173 | |
kvn@3390 | 174 | instruct vaddD_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 175 | predicate(UseAVX > 0); |
kvn@3390 | 176 | match(Set dst (AddD src con)); |
kvn@3390 | 177 | |
kvn@3390 | 178 | format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 179 | ins_cost(150); |
kvn@3390 | 180 | ins_encode %{ |
kvn@3390 | 181 | __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 182 | %} |
kvn@3390 | 183 | ins_pipe(pipe_slow); |
kvn@3390 | 184 | %} |
kvn@3390 | 185 | |
kvn@3390 | 186 | instruct subF_reg(regF dst, regF src) %{ |
kvn@3390 | 187 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 188 | match(Set dst (SubF dst src)); |
kvn@3390 | 189 | |
kvn@3390 | 190 | format %{ "subss $dst, $src" %} |
kvn@3390 | 191 | ins_cost(150); |
kvn@3390 | 192 | ins_encode %{ |
kvn@3390 | 193 | __ subss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 194 | %} |
kvn@3390 | 195 | ins_pipe(pipe_slow); |
kvn@3390 | 196 | %} |
kvn@3390 | 197 | |
kvn@3390 | 198 | instruct subF_mem(regF dst, memory src) %{ |
kvn@3390 | 199 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 200 | match(Set dst (SubF dst (LoadF src))); |
kvn@3390 | 201 | |
kvn@3390 | 202 | format %{ "subss $dst, $src" %} |
kvn@3390 | 203 | ins_cost(150); |
kvn@3390 | 204 | ins_encode %{ |
kvn@3390 | 205 | __ subss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 206 | %} |
kvn@3390 | 207 | ins_pipe(pipe_slow); |
kvn@3390 | 208 | %} |
kvn@3390 | 209 | |
kvn@3390 | 210 | instruct subF_imm(regF dst, immF con) %{ |
kvn@3390 | 211 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 212 | match(Set dst (SubF dst con)); |
kvn@3390 | 213 | format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 214 | ins_cost(150); |
kvn@3390 | 215 | ins_encode %{ |
kvn@3390 | 216 | __ subss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 217 | %} |
kvn@3390 | 218 | ins_pipe(pipe_slow); |
kvn@3390 | 219 | %} |
kvn@3390 | 220 | |
kvn@3390 | 221 | instruct vsubF_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 222 | predicate(UseAVX > 0); |
kvn@3390 | 223 | match(Set dst (SubF src1 src2)); |
kvn@3390 | 224 | |
kvn@3390 | 225 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 226 | ins_cost(150); |
kvn@3390 | 227 | ins_encode %{ |
kvn@3390 | 228 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 229 | %} |
kvn@3390 | 230 | ins_pipe(pipe_slow); |
kvn@3390 | 231 | %} |
kvn@3390 | 232 | |
kvn@3390 | 233 | instruct vsubF_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 234 | predicate(UseAVX > 0); |
kvn@3390 | 235 | match(Set dst (SubF src1 (LoadF src2))); |
kvn@3390 | 236 | |
kvn@3390 | 237 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 238 | ins_cost(150); |
kvn@3390 | 239 | ins_encode %{ |
kvn@3390 | 240 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 241 | %} |
kvn@3390 | 242 | ins_pipe(pipe_slow); |
kvn@3390 | 243 | %} |
kvn@3390 | 244 | |
kvn@3390 | 245 | instruct vsubF_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 246 | predicate(UseAVX > 0); |
kvn@3390 | 247 | match(Set dst (SubF src con)); |
kvn@3390 | 248 | |
kvn@3390 | 249 | format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 250 | ins_cost(150); |
kvn@3390 | 251 | ins_encode %{ |
kvn@3390 | 252 | __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 253 | %} |
kvn@3390 | 254 | ins_pipe(pipe_slow); |
kvn@3390 | 255 | %} |
kvn@3390 | 256 | |
kvn@3390 | 257 | instruct subD_reg(regD dst, regD src) %{ |
kvn@3390 | 258 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 259 | match(Set dst (SubD dst src)); |
kvn@3390 | 260 | |
kvn@3390 | 261 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 262 | ins_cost(150); |
kvn@3390 | 263 | ins_encode %{ |
kvn@3390 | 264 | __ subsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 265 | %} |
kvn@3390 | 266 | ins_pipe(pipe_slow); |
kvn@3390 | 267 | %} |
kvn@3390 | 268 | |
kvn@3390 | 269 | instruct subD_mem(regD dst, memory src) %{ |
kvn@3390 | 270 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 271 | match(Set dst (SubD dst (LoadD src))); |
kvn@3390 | 272 | |
kvn@3390 | 273 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 274 | ins_cost(150); |
kvn@3390 | 275 | ins_encode %{ |
kvn@3390 | 276 | __ subsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 277 | %} |
kvn@3390 | 278 | ins_pipe(pipe_slow); |
kvn@3390 | 279 | %} |
kvn@3390 | 280 | |
kvn@3390 | 281 | instruct subD_imm(regD dst, immD con) %{ |
kvn@3390 | 282 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 283 | match(Set dst (SubD dst con)); |
kvn@3390 | 284 | format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 285 | ins_cost(150); |
kvn@3390 | 286 | ins_encode %{ |
kvn@3390 | 287 | __ subsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 288 | %} |
kvn@3390 | 289 | ins_pipe(pipe_slow); |
kvn@3390 | 290 | %} |
kvn@3390 | 291 | |
kvn@3390 | 292 | instruct vsubD_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 293 | predicate(UseAVX > 0); |
kvn@3390 | 294 | match(Set dst (SubD src1 src2)); |
kvn@3390 | 295 | |
kvn@3390 | 296 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 297 | ins_cost(150); |
kvn@3390 | 298 | ins_encode %{ |
kvn@3390 | 299 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 300 | %} |
kvn@3390 | 301 | ins_pipe(pipe_slow); |
kvn@3390 | 302 | %} |
kvn@3390 | 303 | |
kvn@3390 | 304 | instruct vsubD_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 305 | predicate(UseAVX > 0); |
kvn@3390 | 306 | match(Set dst (SubD src1 (LoadD src2))); |
kvn@3390 | 307 | |
kvn@3390 | 308 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 309 | ins_cost(150); |
kvn@3390 | 310 | ins_encode %{ |
kvn@3390 | 311 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 312 | %} |
kvn@3390 | 313 | ins_pipe(pipe_slow); |
kvn@3390 | 314 | %} |
kvn@3390 | 315 | |
kvn@3390 | 316 | instruct vsubD_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 317 | predicate(UseAVX > 0); |
kvn@3390 | 318 | match(Set dst (SubD src con)); |
kvn@3390 | 319 | |
kvn@3390 | 320 | format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 321 | ins_cost(150); |
kvn@3390 | 322 | ins_encode %{ |
kvn@3390 | 323 | __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 324 | %} |
kvn@3390 | 325 | ins_pipe(pipe_slow); |
kvn@3390 | 326 | %} |
kvn@3390 | 327 | |
kvn@3390 | 328 | instruct mulF_reg(regF dst, regF src) %{ |
kvn@3390 | 329 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 330 | match(Set dst (MulF dst src)); |
kvn@3390 | 331 | |
kvn@3390 | 332 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 333 | ins_cost(150); |
kvn@3390 | 334 | ins_encode %{ |
kvn@3390 | 335 | __ mulss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 336 | %} |
kvn@3390 | 337 | ins_pipe(pipe_slow); |
kvn@3390 | 338 | %} |
kvn@3390 | 339 | |
kvn@3390 | 340 | instruct mulF_mem(regF dst, memory src) %{ |
kvn@3390 | 341 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 342 | match(Set dst (MulF dst (LoadF src))); |
kvn@3390 | 343 | |
kvn@3390 | 344 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 345 | ins_cost(150); |
kvn@3390 | 346 | ins_encode %{ |
kvn@3390 | 347 | __ mulss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 348 | %} |
kvn@3390 | 349 | ins_pipe(pipe_slow); |
kvn@3390 | 350 | %} |
kvn@3390 | 351 | |
kvn@3390 | 352 | instruct mulF_imm(regF dst, immF con) %{ |
kvn@3390 | 353 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 354 | match(Set dst (MulF dst con)); |
kvn@3390 | 355 | format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 356 | ins_cost(150); |
kvn@3390 | 357 | ins_encode %{ |
kvn@3390 | 358 | __ mulss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 359 | %} |
kvn@3390 | 360 | ins_pipe(pipe_slow); |
kvn@3390 | 361 | %} |
kvn@3390 | 362 | |
kvn@3390 | 363 | instruct vmulF_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 364 | predicate(UseAVX > 0); |
kvn@3390 | 365 | match(Set dst (MulF src1 src2)); |
kvn@3390 | 366 | |
kvn@3390 | 367 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 368 | ins_cost(150); |
kvn@3390 | 369 | ins_encode %{ |
kvn@3390 | 370 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 371 | %} |
kvn@3390 | 372 | ins_pipe(pipe_slow); |
kvn@3390 | 373 | %} |
kvn@3390 | 374 | |
kvn@3390 | 375 | instruct vmulF_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 376 | predicate(UseAVX > 0); |
kvn@3390 | 377 | match(Set dst (MulF src1 (LoadF src2))); |
kvn@3390 | 378 | |
kvn@3390 | 379 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 380 | ins_cost(150); |
kvn@3390 | 381 | ins_encode %{ |
kvn@3390 | 382 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 383 | %} |
kvn@3390 | 384 | ins_pipe(pipe_slow); |
kvn@3390 | 385 | %} |
kvn@3390 | 386 | |
kvn@3390 | 387 | instruct vmulF_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 388 | predicate(UseAVX > 0); |
kvn@3390 | 389 | match(Set dst (MulF src con)); |
kvn@3390 | 390 | |
kvn@3390 | 391 | format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 392 | ins_cost(150); |
kvn@3390 | 393 | ins_encode %{ |
kvn@3390 | 394 | __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 395 | %} |
kvn@3390 | 396 | ins_pipe(pipe_slow); |
kvn@3390 | 397 | %} |
kvn@3390 | 398 | |
kvn@3390 | 399 | instruct mulD_reg(regD dst, regD src) %{ |
kvn@3390 | 400 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 401 | match(Set dst (MulD dst src)); |
kvn@3390 | 402 | |
kvn@3390 | 403 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 404 | ins_cost(150); |
kvn@3390 | 405 | ins_encode %{ |
kvn@3390 | 406 | __ mulsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 407 | %} |
kvn@3390 | 408 | ins_pipe(pipe_slow); |
kvn@3390 | 409 | %} |
kvn@3390 | 410 | |
kvn@3390 | 411 | instruct mulD_mem(regD dst, memory src) %{ |
kvn@3390 | 412 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 413 | match(Set dst (MulD dst (LoadD src))); |
kvn@3390 | 414 | |
kvn@3390 | 415 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 416 | ins_cost(150); |
kvn@3390 | 417 | ins_encode %{ |
kvn@3390 | 418 | __ mulsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 419 | %} |
kvn@3390 | 420 | ins_pipe(pipe_slow); |
kvn@3390 | 421 | %} |
kvn@3390 | 422 | |
kvn@3390 | 423 | instruct mulD_imm(regD dst, immD con) %{ |
kvn@3390 | 424 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 425 | match(Set dst (MulD dst con)); |
kvn@3390 | 426 | format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 427 | ins_cost(150); |
kvn@3390 | 428 | ins_encode %{ |
kvn@3390 | 429 | __ mulsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 430 | %} |
kvn@3390 | 431 | ins_pipe(pipe_slow); |
kvn@3390 | 432 | %} |
kvn@3390 | 433 | |
kvn@3390 | 434 | instruct vmulD_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 435 | predicate(UseAVX > 0); |
kvn@3390 | 436 | match(Set dst (MulD src1 src2)); |
kvn@3390 | 437 | |
kvn@3390 | 438 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 439 | ins_cost(150); |
kvn@3390 | 440 | ins_encode %{ |
kvn@3390 | 441 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 442 | %} |
kvn@3390 | 443 | ins_pipe(pipe_slow); |
kvn@3390 | 444 | %} |
kvn@3390 | 445 | |
kvn@3390 | 446 | instruct vmulD_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 447 | predicate(UseAVX > 0); |
kvn@3390 | 448 | match(Set dst (MulD src1 (LoadD src2))); |
kvn@3390 | 449 | |
kvn@3390 | 450 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 451 | ins_cost(150); |
kvn@3390 | 452 | ins_encode %{ |
kvn@3390 | 453 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 454 | %} |
kvn@3390 | 455 | ins_pipe(pipe_slow); |
kvn@3390 | 456 | %} |
kvn@3390 | 457 | |
kvn@3390 | 458 | instruct vmulD_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 459 | predicate(UseAVX > 0); |
kvn@3390 | 460 | match(Set dst (MulD src con)); |
kvn@3390 | 461 | |
kvn@3390 | 462 | format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 463 | ins_cost(150); |
kvn@3390 | 464 | ins_encode %{ |
kvn@3390 | 465 | __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 466 | %} |
kvn@3390 | 467 | ins_pipe(pipe_slow); |
kvn@3390 | 468 | %} |
kvn@3390 | 469 | |
kvn@3390 | 470 | instruct divF_reg(regF dst, regF src) %{ |
kvn@3390 | 471 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 472 | match(Set dst (DivF dst src)); |
kvn@3390 | 473 | |
kvn@3390 | 474 | format %{ "divss $dst, $src" %} |
kvn@3390 | 475 | ins_cost(150); |
kvn@3390 | 476 | ins_encode %{ |
kvn@3390 | 477 | __ divss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 478 | %} |
kvn@3390 | 479 | ins_pipe(pipe_slow); |
kvn@3390 | 480 | %} |
kvn@3390 | 481 | |
kvn@3390 | 482 | instruct divF_mem(regF dst, memory src) %{ |
kvn@3390 | 483 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 484 | match(Set dst (DivF dst (LoadF src))); |
kvn@3390 | 485 | |
kvn@3390 | 486 | format %{ "divss $dst, $src" %} |
kvn@3390 | 487 | ins_cost(150); |
kvn@3390 | 488 | ins_encode %{ |
kvn@3390 | 489 | __ divss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 490 | %} |
kvn@3390 | 491 | ins_pipe(pipe_slow); |
kvn@3390 | 492 | %} |
kvn@3390 | 493 | |
kvn@3390 | 494 | instruct divF_imm(regF dst, immF con) %{ |
kvn@3390 | 495 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 496 | match(Set dst (DivF dst con)); |
kvn@3390 | 497 | format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 498 | ins_cost(150); |
kvn@3390 | 499 | ins_encode %{ |
kvn@3390 | 500 | __ divss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 501 | %} |
kvn@3390 | 502 | ins_pipe(pipe_slow); |
kvn@3390 | 503 | %} |
kvn@3390 | 504 | |
kvn@3390 | 505 | instruct vdivF_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 506 | predicate(UseAVX > 0); |
kvn@3390 | 507 | match(Set dst (DivF src1 src2)); |
kvn@3390 | 508 | |
kvn@3390 | 509 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 510 | ins_cost(150); |
kvn@3390 | 511 | ins_encode %{ |
kvn@3390 | 512 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 513 | %} |
kvn@3390 | 514 | ins_pipe(pipe_slow); |
kvn@3390 | 515 | %} |
kvn@3390 | 516 | |
kvn@3390 | 517 | instruct vdivF_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 518 | predicate(UseAVX > 0); |
kvn@3390 | 519 | match(Set dst (DivF src1 (LoadF src2))); |
kvn@3390 | 520 | |
kvn@3390 | 521 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 522 | ins_cost(150); |
kvn@3390 | 523 | ins_encode %{ |
kvn@3390 | 524 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 525 | %} |
kvn@3390 | 526 | ins_pipe(pipe_slow); |
kvn@3390 | 527 | %} |
kvn@3390 | 528 | |
kvn@3390 | 529 | instruct vdivF_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 530 | predicate(UseAVX > 0); |
kvn@3390 | 531 | match(Set dst (DivF src con)); |
kvn@3390 | 532 | |
kvn@3390 | 533 | format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 534 | ins_cost(150); |
kvn@3390 | 535 | ins_encode %{ |
kvn@3390 | 536 | __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 537 | %} |
kvn@3390 | 538 | ins_pipe(pipe_slow); |
kvn@3390 | 539 | %} |
kvn@3390 | 540 | |
kvn@3390 | 541 | instruct divD_reg(regD dst, regD src) %{ |
kvn@3390 | 542 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 543 | match(Set dst (DivD dst src)); |
kvn@3390 | 544 | |
kvn@3390 | 545 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 546 | ins_cost(150); |
kvn@3390 | 547 | ins_encode %{ |
kvn@3390 | 548 | __ divsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 549 | %} |
kvn@3390 | 550 | ins_pipe(pipe_slow); |
kvn@3390 | 551 | %} |
kvn@3390 | 552 | |
kvn@3390 | 553 | instruct divD_mem(regD dst, memory src) %{ |
kvn@3390 | 554 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 555 | match(Set dst (DivD dst (LoadD src))); |
kvn@3390 | 556 | |
kvn@3390 | 557 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 558 | ins_cost(150); |
kvn@3390 | 559 | ins_encode %{ |
kvn@3390 | 560 | __ divsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 561 | %} |
kvn@3390 | 562 | ins_pipe(pipe_slow); |
kvn@3390 | 563 | %} |
kvn@3390 | 564 | |
kvn@3390 | 565 | instruct divD_imm(regD dst, immD con) %{ |
kvn@3390 | 566 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 567 | match(Set dst (DivD dst con)); |
kvn@3390 | 568 | format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 569 | ins_cost(150); |
kvn@3390 | 570 | ins_encode %{ |
kvn@3390 | 571 | __ divsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 572 | %} |
kvn@3390 | 573 | ins_pipe(pipe_slow); |
kvn@3390 | 574 | %} |
kvn@3390 | 575 | |
kvn@3390 | 576 | instruct vdivD_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 577 | predicate(UseAVX > 0); |
kvn@3390 | 578 | match(Set dst (DivD src1 src2)); |
kvn@3390 | 579 | |
kvn@3390 | 580 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 581 | ins_cost(150); |
kvn@3390 | 582 | ins_encode %{ |
kvn@3390 | 583 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 584 | %} |
kvn@3390 | 585 | ins_pipe(pipe_slow); |
kvn@3390 | 586 | %} |
kvn@3390 | 587 | |
kvn@3390 | 588 | instruct vdivD_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 589 | predicate(UseAVX > 0); |
kvn@3390 | 590 | match(Set dst (DivD src1 (LoadD src2))); |
kvn@3390 | 591 | |
kvn@3390 | 592 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 593 | ins_cost(150); |
kvn@3390 | 594 | ins_encode %{ |
kvn@3390 | 595 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 596 | %} |
kvn@3390 | 597 | ins_pipe(pipe_slow); |
kvn@3390 | 598 | %} |
kvn@3390 | 599 | |
kvn@3390 | 600 | instruct vdivD_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 601 | predicate(UseAVX > 0); |
kvn@3390 | 602 | match(Set dst (DivD src con)); |
kvn@3390 | 603 | |
kvn@3390 | 604 | format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 605 | ins_cost(150); |
kvn@3390 | 606 | ins_encode %{ |
kvn@3390 | 607 | __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 608 | %} |
kvn@3390 | 609 | ins_pipe(pipe_slow); |
kvn@3390 | 610 | %} |
kvn@3390 | 611 | |
kvn@3390 | 612 | instruct absF_reg(regF dst) %{ |
kvn@3390 | 613 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 614 | match(Set dst (AbsF dst)); |
kvn@3390 | 615 | ins_cost(150); |
kvn@3390 | 616 | format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 617 | ins_encode %{ |
kvn@3390 | 618 | __ andps($dst$$XMMRegister, ExternalAddress(float_signmask())); |
kvn@3390 | 619 | %} |
kvn@3390 | 620 | ins_pipe(pipe_slow); |
kvn@3390 | 621 | %} |
kvn@3390 | 622 | |
kvn@3390 | 623 | instruct vabsF_reg(regF dst, regF src) %{ |
kvn@3390 | 624 | predicate(UseAVX > 0); |
kvn@3390 | 625 | match(Set dst (AbsF src)); |
kvn@3390 | 626 | ins_cost(150); |
kvn@3390 | 627 | format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 628 | ins_encode %{ |
kvn@3390 | 629 | __ vandps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@3390 | 630 | ExternalAddress(float_signmask())); |
kvn@3390 | 631 | %} |
kvn@3390 | 632 | ins_pipe(pipe_slow); |
kvn@3390 | 633 | %} |
kvn@3390 | 634 | |
kvn@3390 | 635 | instruct absD_reg(regD dst) %{ |
kvn@3390 | 636 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 637 | match(Set dst (AbsD dst)); |
kvn@3390 | 638 | ins_cost(150); |
kvn@3390 | 639 | format %{ "andpd $dst, [0x7fffffffffffffff]\t" |
kvn@3390 | 640 | "# abs double by sign masking" %} |
kvn@3390 | 641 | ins_encode %{ |
kvn@3390 | 642 | __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask())); |
kvn@3390 | 643 | %} |
kvn@3390 | 644 | ins_pipe(pipe_slow); |
kvn@3390 | 645 | %} |
kvn@3390 | 646 | |
kvn@3390 | 647 | instruct vabsD_reg(regD dst, regD src) %{ |
kvn@3390 | 648 | predicate(UseAVX > 0); |
kvn@3390 | 649 | match(Set dst (AbsD src)); |
kvn@3390 | 650 | ins_cost(150); |
kvn@3390 | 651 | format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t" |
kvn@3390 | 652 | "# abs double by sign masking" %} |
kvn@3390 | 653 | ins_encode %{ |
kvn@3390 | 654 | __ vandpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@3390 | 655 | ExternalAddress(double_signmask())); |
kvn@3390 | 656 | %} |
kvn@3390 | 657 | ins_pipe(pipe_slow); |
kvn@3390 | 658 | %} |
kvn@3390 | 659 | |
kvn@3390 | 660 | instruct negF_reg(regF dst) %{ |
kvn@3390 | 661 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 662 | match(Set dst (NegF dst)); |
kvn@3390 | 663 | ins_cost(150); |
kvn@3390 | 664 | format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 665 | ins_encode %{ |
kvn@3390 | 666 | __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip())); |
kvn@3390 | 667 | %} |
kvn@3390 | 668 | ins_pipe(pipe_slow); |
kvn@3390 | 669 | %} |
kvn@3390 | 670 | |
kvn@3390 | 671 | instruct vnegF_reg(regF dst, regF src) %{ |
kvn@3390 | 672 | predicate(UseAVX > 0); |
kvn@3390 | 673 | match(Set dst (NegF src)); |
kvn@3390 | 674 | ins_cost(150); |
kvn@3390 | 675 | format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 676 | ins_encode %{ |
kvn@3390 | 677 | __ vxorps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@3390 | 678 | ExternalAddress(float_signflip())); |
kvn@3390 | 679 | %} |
kvn@3390 | 680 | ins_pipe(pipe_slow); |
kvn@3390 | 681 | %} |
kvn@3390 | 682 | |
kvn@3390 | 683 | instruct negD_reg(regD dst) %{ |
kvn@3390 | 684 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 685 | match(Set dst (NegD dst)); |
kvn@3390 | 686 | ins_cost(150); |
kvn@3390 | 687 | format %{ "xorpd $dst, [0x8000000000000000]\t" |
kvn@3390 | 688 | "# neg double by sign flipping" %} |
kvn@3390 | 689 | ins_encode %{ |
kvn@3390 | 690 | __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip())); |
kvn@3390 | 691 | %} |
kvn@3390 | 692 | ins_pipe(pipe_slow); |
kvn@3390 | 693 | %} |
kvn@3390 | 694 | |
kvn@3390 | 695 | instruct vnegD_reg(regD dst, regD src) %{ |
kvn@3390 | 696 | predicate(UseAVX > 0); |
kvn@3390 | 697 | match(Set dst (NegD src)); |
kvn@3390 | 698 | ins_cost(150); |
kvn@3390 | 699 | format %{ "vxorpd $dst, $src, [0x8000000000000000]\t" |
kvn@3390 | 700 | "# neg double by sign flipping" %} |
kvn@3390 | 701 | ins_encode %{ |
kvn@3390 | 702 | __ vxorpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@3390 | 703 | ExternalAddress(double_signflip())); |
kvn@3390 | 704 | %} |
kvn@3390 | 705 | ins_pipe(pipe_slow); |
kvn@3390 | 706 | %} |
kvn@3390 | 707 | |
kvn@3390 | 708 | instruct sqrtF_reg(regF dst, regF src) %{ |
kvn@3390 | 709 | predicate(UseSSE>=1); |
kvn@3390 | 710 | match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); |
kvn@3390 | 711 | |
kvn@3390 | 712 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 713 | ins_cost(150); |
kvn@3390 | 714 | ins_encode %{ |
kvn@3390 | 715 | __ sqrtss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 716 | %} |
kvn@3390 | 717 | ins_pipe(pipe_slow); |
kvn@3390 | 718 | %} |
kvn@3390 | 719 | |
kvn@3390 | 720 | instruct sqrtF_mem(regF dst, memory src) %{ |
kvn@3390 | 721 | predicate(UseSSE>=1); |
kvn@3390 | 722 | match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src))))); |
kvn@3390 | 723 | |
kvn@3390 | 724 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 725 | ins_cost(150); |
kvn@3390 | 726 | ins_encode %{ |
kvn@3390 | 727 | __ sqrtss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 728 | %} |
kvn@3390 | 729 | ins_pipe(pipe_slow); |
kvn@3390 | 730 | %} |
kvn@3390 | 731 | |
kvn@3390 | 732 | instruct sqrtF_imm(regF dst, immF con) %{ |
kvn@3390 | 733 | predicate(UseSSE>=1); |
kvn@3390 | 734 | match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); |
kvn@3390 | 735 | format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 736 | ins_cost(150); |
kvn@3390 | 737 | ins_encode %{ |
kvn@3390 | 738 | __ sqrtss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 739 | %} |
kvn@3390 | 740 | ins_pipe(pipe_slow); |
kvn@3390 | 741 | %} |
kvn@3390 | 742 | |
kvn@3390 | 743 | instruct sqrtD_reg(regD dst, regD src) %{ |
kvn@3390 | 744 | predicate(UseSSE>=2); |
kvn@3390 | 745 | match(Set dst (SqrtD src)); |
kvn@3390 | 746 | |
kvn@3390 | 747 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 748 | ins_cost(150); |
kvn@3390 | 749 | ins_encode %{ |
kvn@3390 | 750 | __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 751 | %} |
kvn@3390 | 752 | ins_pipe(pipe_slow); |
kvn@3390 | 753 | %} |
kvn@3390 | 754 | |
kvn@3390 | 755 | instruct sqrtD_mem(regD dst, memory src) %{ |
kvn@3390 | 756 | predicate(UseSSE>=2); |
kvn@3390 | 757 | match(Set dst (SqrtD (LoadD src))); |
kvn@3390 | 758 | |
kvn@3390 | 759 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 760 | ins_cost(150); |
kvn@3390 | 761 | ins_encode %{ |
kvn@3390 | 762 | __ sqrtsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 763 | %} |
kvn@3390 | 764 | ins_pipe(pipe_slow); |
kvn@3390 | 765 | %} |
kvn@3390 | 766 | |
kvn@3390 | 767 | instruct sqrtD_imm(regD dst, immD con) %{ |
kvn@3390 | 768 | predicate(UseSSE>=2); |
kvn@3390 | 769 | match(Set dst (SqrtD con)); |
kvn@3390 | 770 | format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 771 | ins_cost(150); |
kvn@3390 | 772 | ins_encode %{ |
kvn@3390 | 773 | __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 774 | %} |
kvn@3390 | 775 | ins_pipe(pipe_slow); |
kvn@3390 | 776 | %} |
kvn@3390 | 777 |