Tue, 23 Oct 2012 13:06:37 -0700
8001183: incorrect results of char vectors right shift operaiton
Summary: do vector right shift operation for small int types only after loads
Reviewed-by: jrose, dlong
kvn@3390 | 1 | // |
kvn@3577 | 2 | // Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. |
kvn@3390 | 3 | // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
kvn@3390 | 4 | // |
kvn@3390 | 5 | // This code is free software; you can redistribute it and/or modify it |
kvn@3390 | 6 | // under the terms of the GNU General Public License version 2 only, as |
kvn@3390 | 7 | // published by the Free Software Foundation. |
kvn@3390 | 8 | // |
kvn@3390 | 9 | // This code is distributed in the hope that it will be useful, but WITHOUT |
kvn@3390 | 10 | // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
kvn@3390 | 11 | // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
kvn@3390 | 12 | // version 2 for more details (a copy is included in the LICENSE file that |
kvn@3390 | 13 | // accompanied this code). |
kvn@3390 | 14 | // |
kvn@3390 | 15 | // You should have received a copy of the GNU General Public License version |
kvn@3390 | 16 | // 2 along with this work; if not, write to the Free Software Foundation, |
kvn@3390 | 17 | // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
kvn@3390 | 18 | // |
kvn@3390 | 19 | // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
kvn@3390 | 20 | // or visit www.oracle.com if you need additional information or have any |
kvn@3390 | 21 | // questions. |
kvn@3390 | 22 | // |
kvn@3390 | 23 | // |
kvn@3390 | 24 | |
kvn@3390 | 25 | // X86 Common Architecture Description File |
kvn@3390 | 26 | |
kvn@3882 | 27 | //----------REGISTER DEFINITION BLOCK------------------------------------------ |
kvn@3882 | 28 | // This information is used by the matcher and the register allocator to |
kvn@3882 | 29 | // describe individual registers and classes of registers within the target |
kvn@3882 | 30 | // archtecture. |
kvn@3882 | 31 | |
kvn@3882 | 32 | register %{ |
kvn@3882 | 33 | //----------Architecture Description Register Definitions---------------------- |
kvn@3882 | 34 | // General Registers |
kvn@3882 | 35 | // "reg_def" name ( register save type, C convention save type, |
kvn@3882 | 36 | // ideal register type, encoding ); |
kvn@3882 | 37 | // Register Save Types: |
kvn@3882 | 38 | // |
kvn@3882 | 39 | // NS = No-Save: The register allocator assumes that these registers |
kvn@3882 | 40 | // can be used without saving upon entry to the method, & |
kvn@3882 | 41 | // that they do not need to be saved at call sites. |
kvn@3882 | 42 | // |
kvn@3882 | 43 | // SOC = Save-On-Call: The register allocator assumes that these registers |
kvn@3882 | 44 | // can be used without saving upon entry to the method, |
kvn@3882 | 45 | // but that they must be saved at call sites. |
kvn@3882 | 46 | // |
kvn@3882 | 47 | // SOE = Save-On-Entry: The register allocator assumes that these registers |
kvn@3882 | 48 | // must be saved before using them upon entry to the |
kvn@3882 | 49 | // method, but they do not need to be saved at call |
kvn@3882 | 50 | // sites. |
kvn@3882 | 51 | // |
kvn@3882 | 52 | // AS = Always-Save: The register allocator assumes that these registers |
kvn@3882 | 53 | // must be saved before using them upon entry to the |
kvn@3882 | 54 | // method, & that they must be saved at call sites. |
kvn@3882 | 55 | // |
kvn@3882 | 56 | // Ideal Register Type is used to determine how to save & restore a |
kvn@3882 | 57 | // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get |
kvn@3882 | 58 | // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. |
kvn@3882 | 59 | // |
kvn@3882 | 60 | // The encoding number is the actual bit-pattern placed into the opcodes. |
kvn@3882 | 61 | |
kvn@3882 | 62 | // XMM registers. 256-bit registers or 8 words each, labeled (a)-h. |
kvn@3882 | 63 | // Word a in each register holds a Float, words ab hold a Double. |
kvn@3882 | 64 | // The whole registers are used in SSE4.2 version intrinsics, |
kvn@3882 | 65 | // array copy stubs and superword operations (see UseSSE42Intrinsics, |
kvn@3882 | 66 | // UseXMMForArrayCopy and UseSuperword flags). |
kvn@3882 | 67 | // XMM8-XMM15 must be encoded with REX (VEX for UseAVX). |
kvn@3882 | 68 | // Linux ABI: No register preserved across function calls |
kvn@3882 | 69 | // XMM0-XMM7 might hold parameters |
kvn@3882 | 70 | // Windows ABI: XMM6-XMM15 preserved across function calls |
kvn@3882 | 71 | // XMM0-XMM3 might hold parameters |
kvn@3882 | 72 | |
kvn@3882 | 73 | reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); |
kvn@3929 | 74 | reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1)); |
kvn@3929 | 75 | reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2)); |
kvn@3929 | 76 | reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3)); |
kvn@3929 | 77 | reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4)); |
kvn@3929 | 78 | reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5)); |
kvn@3929 | 79 | reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6)); |
kvn@3929 | 80 | reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7)); |
kvn@3882 | 81 | |
kvn@3882 | 82 | reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); |
kvn@3929 | 83 | reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1)); |
kvn@3929 | 84 | reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2)); |
kvn@3929 | 85 | reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3)); |
kvn@3929 | 86 | reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4)); |
kvn@3929 | 87 | reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5)); |
kvn@3929 | 88 | reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6)); |
kvn@3929 | 89 | reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7)); |
kvn@3882 | 90 | |
kvn@3882 | 91 | reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); |
kvn@3929 | 92 | reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1)); |
kvn@3929 | 93 | reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2)); |
kvn@3929 | 94 | reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3)); |
kvn@3929 | 95 | reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4)); |
kvn@3929 | 96 | reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5)); |
kvn@3929 | 97 | reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6)); |
kvn@3929 | 98 | reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7)); |
kvn@3882 | 99 | |
kvn@3882 | 100 | reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); |
kvn@3929 | 101 | reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1)); |
kvn@3929 | 102 | reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2)); |
kvn@3929 | 103 | reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3)); |
kvn@3929 | 104 | reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4)); |
kvn@3929 | 105 | reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5)); |
kvn@3929 | 106 | reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6)); |
kvn@3929 | 107 | reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7)); |
kvn@3882 | 108 | |
kvn@3882 | 109 | reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); |
kvn@3929 | 110 | reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1)); |
kvn@3929 | 111 | reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2)); |
kvn@3929 | 112 | reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3)); |
kvn@3929 | 113 | reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4)); |
kvn@3929 | 114 | reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5)); |
kvn@3929 | 115 | reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6)); |
kvn@3929 | 116 | reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7)); |
kvn@3882 | 117 | |
kvn@3882 | 118 | reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); |
kvn@3929 | 119 | reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1)); |
kvn@3929 | 120 | reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2)); |
kvn@3929 | 121 | reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3)); |
kvn@3929 | 122 | reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4)); |
kvn@3929 | 123 | reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5)); |
kvn@3929 | 124 | reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6)); |
kvn@3929 | 125 | reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7)); |
kvn@3882 | 126 | |
kvn@3882 | 127 | #ifdef _WIN64 |
kvn@3882 | 128 | |
kvn@3882 | 129 | reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()); |
kvn@3929 | 130 | reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(1)); |
kvn@3929 | 131 | reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(2)); |
kvn@3929 | 132 | reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(3)); |
kvn@3929 | 133 | reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(4)); |
kvn@3929 | 134 | reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(5)); |
kvn@3929 | 135 | reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(6)); |
kvn@3929 | 136 | reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(7)); |
kvn@3882 | 137 | |
kvn@3882 | 138 | reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()); |
kvn@3929 | 139 | reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(1)); |
kvn@3929 | 140 | reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(2)); |
kvn@3929 | 141 | reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(3)); |
kvn@3929 | 142 | reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(4)); |
kvn@3929 | 143 | reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(5)); |
kvn@3929 | 144 | reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(6)); |
kvn@3929 | 145 | reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(7)); |
kvn@3882 | 146 | |
kvn@3882 | 147 | reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()); |
kvn@3929 | 148 | reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(1)); |
kvn@3929 | 149 | reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(2)); |
kvn@3929 | 150 | reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(3)); |
kvn@3929 | 151 | reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(4)); |
kvn@3929 | 152 | reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(5)); |
kvn@3929 | 153 | reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(6)); |
kvn@3929 | 154 | reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(7)); |
kvn@3882 | 155 | |
kvn@3882 | 156 | reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()); |
kvn@3929 | 157 | reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(1)); |
kvn@3929 | 158 | reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(2)); |
kvn@3929 | 159 | reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(3)); |
kvn@3929 | 160 | reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(4)); |
kvn@3929 | 161 | reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(5)); |
kvn@3929 | 162 | reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(6)); |
kvn@3929 | 163 | reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(7)); |
kvn@3882 | 164 | |
kvn@3882 | 165 | reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()); |
kvn@3929 | 166 | reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(1)); |
kvn@3929 | 167 | reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(2)); |
kvn@3929 | 168 | reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(3)); |
kvn@3929 | 169 | reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(4)); |
kvn@3929 | 170 | reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(5)); |
kvn@3929 | 171 | reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(6)); |
kvn@3929 | 172 | reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(7)); |
kvn@3882 | 173 | |
kvn@3882 | 174 | reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()); |
kvn@3929 | 175 | reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(1)); |
kvn@3929 | 176 | reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(2)); |
kvn@3929 | 177 | reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(3)); |
kvn@3929 | 178 | reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(4)); |
kvn@3929 | 179 | reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(5)); |
kvn@3929 | 180 | reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(6)); |
kvn@3929 | 181 | reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(7)); |
kvn@3882 | 182 | |
kvn@3882 | 183 | reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()); |
kvn@3929 | 184 | reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(1)); |
kvn@3929 | 185 | reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(2)); |
kvn@3929 | 186 | reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(3)); |
kvn@3929 | 187 | reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(4)); |
kvn@3929 | 188 | reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(5)); |
kvn@3929 | 189 | reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(6)); |
kvn@3929 | 190 | reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(7)); |
kvn@3882 | 191 | |
kvn@3882 | 192 | reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()); |
kvn@3929 | 193 | reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(1)); |
kvn@3929 | 194 | reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(2)); |
kvn@3929 | 195 | reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(3)); |
kvn@3929 | 196 | reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(4)); |
kvn@3929 | 197 | reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(5)); |
kvn@3929 | 198 | reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(6)); |
kvn@3929 | 199 | reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(7)); |
kvn@3882 | 200 | |
kvn@3882 | 201 | reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()); |
kvn@3929 | 202 | reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(1)); |
kvn@3929 | 203 | reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(2)); |
kvn@3929 | 204 | reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(3)); |
kvn@3929 | 205 | reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(4)); |
kvn@3929 | 206 | reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(5)); |
kvn@3929 | 207 | reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(6)); |
kvn@3929 | 208 | reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(7)); |
kvn@3882 | 209 | |
kvn@3882 | 210 | reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()); |
kvn@3929 | 211 | reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(1)); |
kvn@3929 | 212 | reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(2)); |
kvn@3929 | 213 | reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(3)); |
kvn@3929 | 214 | reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(4)); |
kvn@3929 | 215 | reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(5)); |
kvn@3929 | 216 | reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(6)); |
kvn@3929 | 217 | reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(7)); |
kvn@3882 | 218 | |
kvn@3882 | 219 | #else // _WIN64 |
kvn@3882 | 220 | |
kvn@3882 | 221 | reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); |
kvn@3929 | 222 | reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1)); |
kvn@3929 | 223 | reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2)); |
kvn@3929 | 224 | reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3)); |
kvn@3929 | 225 | reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4)); |
kvn@3929 | 226 | reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5)); |
kvn@3929 | 227 | reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6)); |
kvn@3929 | 228 | reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7)); |
kvn@3882 | 229 | |
kvn@3882 | 230 | reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); |
kvn@3929 | 231 | reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1)); |
kvn@3929 | 232 | reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2)); |
kvn@3929 | 233 | reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3)); |
kvn@3929 | 234 | reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4)); |
kvn@3929 | 235 | reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5)); |
kvn@3929 | 236 | reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6)); |
kvn@3929 | 237 | reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7)); |
kvn@3882 | 238 | |
kvn@3882 | 239 | #ifdef _LP64 |
kvn@3882 | 240 | |
kvn@3882 | 241 | reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); |
kvn@3929 | 242 | reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); |
kvn@3929 | 243 | reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); |
kvn@3929 | 244 | reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3)); |
kvn@3929 | 245 | reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4)); |
kvn@3929 | 246 | reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5)); |
kvn@3929 | 247 | reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6)); |
kvn@3929 | 248 | reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7)); |
kvn@3882 | 249 | |
kvn@3882 | 250 | reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); |
kvn@3929 | 251 | reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1)); |
kvn@3929 | 252 | reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2)); |
kvn@3929 | 253 | reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3)); |
kvn@3929 | 254 | reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4)); |
kvn@3929 | 255 | reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5)); |
kvn@3929 | 256 | reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6)); |
kvn@3929 | 257 | reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7)); |
kvn@3882 | 258 | |
kvn@3882 | 259 | reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); |
kvn@3929 | 260 | reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1)); |
kvn@3929 | 261 | reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2)); |
kvn@3929 | 262 | reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3)); |
kvn@3929 | 263 | reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4)); |
kvn@3929 | 264 | reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5)); |
kvn@3929 | 265 | reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6)); |
kvn@3929 | 266 | reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7)); |
kvn@3882 | 267 | |
kvn@3882 | 268 | reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); |
kvn@3929 | 269 | reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1)); |
kvn@3929 | 270 | reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2)); |
kvn@3929 | 271 | reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3)); |
kvn@3929 | 272 | reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4)); |
kvn@3929 | 273 | reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5)); |
kvn@3929 | 274 | reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6)); |
kvn@3929 | 275 | reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7)); |
kvn@3882 | 276 | |
kvn@3882 | 277 | reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); |
kvn@3929 | 278 | reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1)); |
kvn@3929 | 279 | reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2)); |
kvn@3929 | 280 | reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3)); |
kvn@3929 | 281 | reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4)); |
kvn@3929 | 282 | reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5)); |
kvn@3929 | 283 | reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6)); |
kvn@3929 | 284 | reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7)); |
kvn@3882 | 285 | |
kvn@3882 | 286 | reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); |
kvn@3929 | 287 | reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1)); |
kvn@3929 | 288 | reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2)); |
kvn@3929 | 289 | reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3)); |
kvn@3929 | 290 | reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4)); |
kvn@3929 | 291 | reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5)); |
kvn@3929 | 292 | reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6)); |
kvn@3929 | 293 | reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7)); |
kvn@3882 | 294 | |
kvn@3882 | 295 | reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); |
kvn@3929 | 296 | reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1)); |
kvn@3929 | 297 | reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2)); |
kvn@3929 | 298 | reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3)); |
kvn@3929 | 299 | reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4)); |
kvn@3929 | 300 | reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5)); |
kvn@3929 | 301 | reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6)); |
kvn@3929 | 302 | reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7)); |
kvn@3882 | 303 | |
kvn@3882 | 304 | reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); |
kvn@3929 | 305 | reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1)); |
kvn@3929 | 306 | reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2)); |
kvn@3929 | 307 | reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3)); |
kvn@3929 | 308 | reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4)); |
kvn@3929 | 309 | reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5)); |
kvn@3929 | 310 | reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6)); |
kvn@3929 | 311 | reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7)); |
kvn@3882 | 312 | |
kvn@3882 | 313 | #endif // _LP64 |
kvn@3882 | 314 | |
kvn@3882 | 315 | #endif // _WIN64 |
kvn@3882 | 316 | |
kvn@3882 | 317 | #ifdef _LP64 |
kvn@3882 | 318 | reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); |
kvn@3882 | 319 | #else |
kvn@3882 | 320 | reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad()); |
kvn@3882 | 321 | #endif // _LP64 |
kvn@3882 | 322 | |
kvn@3882 | 323 | alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
kvn@3882 | 324 | XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
kvn@3882 | 325 | XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
kvn@3882 | 326 | XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
kvn@3882 | 327 | XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
kvn@3882 | 328 | XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
kvn@3882 | 329 | XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
kvn@3882 | 330 | XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
kvn@3882 | 331 | #ifdef _LP64 |
kvn@3882 | 332 | ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
kvn@3882 | 333 | XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
kvn@3882 | 334 | XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
kvn@3882 | 335 | XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
kvn@3882 | 336 | XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
kvn@3882 | 337 | XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
kvn@3882 | 338 | XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
kvn@3882 | 339 | XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h |
kvn@3882 | 340 | #endif |
kvn@3882 | 341 | ); |
kvn@3882 | 342 | |
kvn@3882 | 343 | // flags allocation class should be last. |
kvn@3882 | 344 | alloc_class chunk2(RFLAGS); |
kvn@3882 | 345 | |
kvn@3882 | 346 | // Singleton class for condition codes |
kvn@3882 | 347 | reg_class int_flags(RFLAGS); |
kvn@3882 | 348 | |
kvn@3882 | 349 | // Class for all float registers |
kvn@3882 | 350 | reg_class float_reg(XMM0, |
kvn@3882 | 351 | XMM1, |
kvn@3882 | 352 | XMM2, |
kvn@3882 | 353 | XMM3, |
kvn@3882 | 354 | XMM4, |
kvn@3882 | 355 | XMM5, |
kvn@3882 | 356 | XMM6, |
kvn@3882 | 357 | XMM7 |
kvn@3882 | 358 | #ifdef _LP64 |
kvn@3882 | 359 | ,XMM8, |
kvn@3882 | 360 | XMM9, |
kvn@3882 | 361 | XMM10, |
kvn@3882 | 362 | XMM11, |
kvn@3882 | 363 | XMM12, |
kvn@3882 | 364 | XMM13, |
kvn@3882 | 365 | XMM14, |
kvn@3882 | 366 | XMM15 |
kvn@3882 | 367 | #endif |
kvn@3882 | 368 | ); |
kvn@3882 | 369 | |
kvn@3882 | 370 | // Class for all double registers |
kvn@3882 | 371 | reg_class double_reg(XMM0, XMM0b, |
kvn@3882 | 372 | XMM1, XMM1b, |
kvn@3882 | 373 | XMM2, XMM2b, |
kvn@3882 | 374 | XMM3, XMM3b, |
kvn@3882 | 375 | XMM4, XMM4b, |
kvn@3882 | 376 | XMM5, XMM5b, |
kvn@3882 | 377 | XMM6, XMM6b, |
kvn@3882 | 378 | XMM7, XMM7b |
kvn@3882 | 379 | #ifdef _LP64 |
kvn@3882 | 380 | ,XMM8, XMM8b, |
kvn@3882 | 381 | XMM9, XMM9b, |
kvn@3882 | 382 | XMM10, XMM10b, |
kvn@3882 | 383 | XMM11, XMM11b, |
kvn@3882 | 384 | XMM12, XMM12b, |
kvn@3882 | 385 | XMM13, XMM13b, |
kvn@3882 | 386 | XMM14, XMM14b, |
kvn@3882 | 387 | XMM15, XMM15b |
kvn@3882 | 388 | #endif |
kvn@3882 | 389 | ); |
kvn@3882 | 390 | |
kvn@3882 | 391 | // Class for all 32bit vector registers |
kvn@3882 | 392 | reg_class vectors_reg(XMM0, |
kvn@3882 | 393 | XMM1, |
kvn@3882 | 394 | XMM2, |
kvn@3882 | 395 | XMM3, |
kvn@3882 | 396 | XMM4, |
kvn@3882 | 397 | XMM5, |
kvn@3882 | 398 | XMM6, |
kvn@3882 | 399 | XMM7 |
kvn@3882 | 400 | #ifdef _LP64 |
kvn@3882 | 401 | ,XMM8, |
kvn@3882 | 402 | XMM9, |
kvn@3882 | 403 | XMM10, |
kvn@3882 | 404 | XMM11, |
kvn@3882 | 405 | XMM12, |
kvn@3882 | 406 | XMM13, |
kvn@3882 | 407 | XMM14, |
kvn@3882 | 408 | XMM15 |
kvn@3882 | 409 | #endif |
kvn@3882 | 410 | ); |
kvn@3882 | 411 | |
kvn@3882 | 412 | // Class for all 64bit vector registers |
kvn@3882 | 413 | reg_class vectord_reg(XMM0, XMM0b, |
kvn@3882 | 414 | XMM1, XMM1b, |
kvn@3882 | 415 | XMM2, XMM2b, |
kvn@3882 | 416 | XMM3, XMM3b, |
kvn@3882 | 417 | XMM4, XMM4b, |
kvn@3882 | 418 | XMM5, XMM5b, |
kvn@3882 | 419 | XMM6, XMM6b, |
kvn@3882 | 420 | XMM7, XMM7b |
kvn@3882 | 421 | #ifdef _LP64 |
kvn@3882 | 422 | ,XMM8, XMM8b, |
kvn@3882 | 423 | XMM9, XMM9b, |
kvn@3882 | 424 | XMM10, XMM10b, |
kvn@3882 | 425 | XMM11, XMM11b, |
kvn@3882 | 426 | XMM12, XMM12b, |
kvn@3882 | 427 | XMM13, XMM13b, |
kvn@3882 | 428 | XMM14, XMM14b, |
kvn@3882 | 429 | XMM15, XMM15b |
kvn@3882 | 430 | #endif |
kvn@3882 | 431 | ); |
kvn@3882 | 432 | |
kvn@3882 | 433 | // Class for all 128bit vector registers |
kvn@3882 | 434 | reg_class vectorx_reg(XMM0, XMM0b, XMM0c, XMM0d, |
kvn@3882 | 435 | XMM1, XMM1b, XMM1c, XMM1d, |
kvn@3882 | 436 | XMM2, XMM2b, XMM2c, XMM2d, |
kvn@3882 | 437 | XMM3, XMM3b, XMM3c, XMM3d, |
kvn@3882 | 438 | XMM4, XMM4b, XMM4c, XMM4d, |
kvn@3882 | 439 | XMM5, XMM5b, XMM5c, XMM5d, |
kvn@3882 | 440 | XMM6, XMM6b, XMM6c, XMM6d, |
kvn@3882 | 441 | XMM7, XMM7b, XMM7c, XMM7d |
kvn@3882 | 442 | #ifdef _LP64 |
kvn@3882 | 443 | ,XMM8, XMM8b, XMM8c, XMM8d, |
kvn@3882 | 444 | XMM9, XMM9b, XMM9c, XMM9d, |
kvn@3882 | 445 | XMM10, XMM10b, XMM10c, XMM10d, |
kvn@3882 | 446 | XMM11, XMM11b, XMM11c, XMM11d, |
kvn@3882 | 447 | XMM12, XMM12b, XMM12c, XMM12d, |
kvn@3882 | 448 | XMM13, XMM13b, XMM13c, XMM13d, |
kvn@3882 | 449 | XMM14, XMM14b, XMM14c, XMM14d, |
kvn@3882 | 450 | XMM15, XMM15b, XMM15c, XMM15d |
kvn@3882 | 451 | #endif |
kvn@3882 | 452 | ); |
kvn@3882 | 453 | |
kvn@3882 | 454 | // Class for all 256bit vector registers |
kvn@3882 | 455 | reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
kvn@3882 | 456 | XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
kvn@3882 | 457 | XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
kvn@3882 | 458 | XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
kvn@3882 | 459 | XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
kvn@3882 | 460 | XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
kvn@3882 | 461 | XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
kvn@3882 | 462 | XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
kvn@3882 | 463 | #ifdef _LP64 |
kvn@3882 | 464 | ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
kvn@3882 | 465 | XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
kvn@3882 | 466 | XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
kvn@3882 | 467 | XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
kvn@3882 | 468 | XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
kvn@3882 | 469 | XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
kvn@3882 | 470 | XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
kvn@3882 | 471 | XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h |
kvn@3882 | 472 | #endif |
kvn@3882 | 473 | ); |
kvn@3882 | 474 | |
kvn@3882 | 475 | %} |
kvn@3882 | 476 | |
kvn@3390 | 477 | source %{ |
kvn@3390 | 478 | // Float masks come from different places depending on platform. |
kvn@3390 | 479 | #ifdef _LP64 |
kvn@3390 | 480 | static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } |
kvn@3390 | 481 | static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } |
kvn@3390 | 482 | static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } |
kvn@3390 | 483 | static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } |
kvn@3390 | 484 | #else |
kvn@3390 | 485 | static address float_signmask() { return (address)float_signmask_pool; } |
kvn@3390 | 486 | static address float_signflip() { return (address)float_signflip_pool; } |
kvn@3390 | 487 | static address double_signmask() { return (address)double_signmask_pool; } |
kvn@3390 | 488 | static address double_signflip() { return (address)double_signflip_pool; } |
kvn@3390 | 489 | #endif |
kvn@3577 | 490 | |
kvn@3882 | 491 | |
kvn@4001 | 492 | const bool Matcher::match_rule_supported(int opcode) { |
kvn@4001 | 493 | if (!has_match_rule(opcode)) |
kvn@4001 | 494 | return false; |
kvn@4001 | 495 | |
kvn@4001 | 496 | switch (opcode) { |
kvn@4001 | 497 | case Op_PopCountI: |
kvn@4001 | 498 | case Op_PopCountL: |
kvn@4001 | 499 | if (!UsePopCountInstruction) |
kvn@4001 | 500 | return false; |
kvn@4103 | 501 | break; |
kvn@4001 | 502 | case Op_MulVI: |
kvn@4001 | 503 | if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX |
kvn@4001 | 504 | return false; |
kvn@4001 | 505 | break; |
roland@4106 | 506 | case Op_CompareAndSwapL: |
roland@4106 | 507 | #ifdef _LP64 |
roland@4106 | 508 | case Op_CompareAndSwapP: |
roland@4106 | 509 | #endif |
roland@4106 | 510 | if (!VM_Version::supports_cx8()) |
roland@4106 | 511 | return false; |
roland@4106 | 512 | break; |
kvn@4001 | 513 | } |
kvn@4001 | 514 | |
kvn@4001 | 515 | return true; // Per default match rules are supported. |
kvn@4001 | 516 | } |
kvn@4001 | 517 | |
kvn@3882 | 518 | // Max vector size in bytes. 0 if not supported. |
kvn@3882 | 519 | const int Matcher::vector_width_in_bytes(BasicType bt) { |
kvn@3882 | 520 | assert(is_java_primitive(bt), "only primitive type vectors"); |
kvn@3882 | 521 | if (UseSSE < 2) return 0; |
kvn@3882 | 522 | // SSE2 supports 128bit vectors for all types. |
kvn@3882 | 523 | // AVX2 supports 256bit vectors for all types. |
kvn@3882 | 524 | int size = (UseAVX > 1) ? 32 : 16; |
kvn@3882 | 525 | // AVX1 supports 256bit vectors only for FLOAT and DOUBLE. |
kvn@3882 | 526 | if (UseAVX > 0 && (bt == T_FLOAT || bt == T_DOUBLE)) |
kvn@3882 | 527 | size = 32; |
kvn@3882 | 528 | // Use flag to limit vector size. |
kvn@3882 | 529 | size = MIN2(size,(int)MaxVectorSize); |
kvn@3882 | 530 | // Minimum 2 values in vector (or 4 for bytes). |
kvn@3882 | 531 | switch (bt) { |
kvn@3882 | 532 | case T_DOUBLE: |
kvn@3882 | 533 | case T_LONG: |
kvn@3882 | 534 | if (size < 16) return 0; |
kvn@3882 | 535 | case T_FLOAT: |
kvn@3882 | 536 | case T_INT: |
kvn@3882 | 537 | if (size < 8) return 0; |
kvn@3882 | 538 | case T_BOOLEAN: |
kvn@3882 | 539 | case T_BYTE: |
kvn@3882 | 540 | case T_CHAR: |
kvn@3882 | 541 | case T_SHORT: |
kvn@3882 | 542 | if (size < 4) return 0; |
kvn@3882 | 543 | break; |
kvn@3882 | 544 | default: |
kvn@3882 | 545 | ShouldNotReachHere(); |
kvn@3882 | 546 | } |
kvn@3882 | 547 | return size; |
kvn@3882 | 548 | } |
kvn@3882 | 549 | |
kvn@3882 | 550 | // Limits on vector size (number of elements) loaded into vector. |
kvn@3882 | 551 | const int Matcher::max_vector_size(const BasicType bt) { |
kvn@3882 | 552 | return vector_width_in_bytes(bt)/type2aelembytes(bt); |
kvn@3882 | 553 | } |
kvn@3882 | 554 | const int Matcher::min_vector_size(const BasicType bt) { |
kvn@3882 | 555 | int max_size = max_vector_size(bt); |
kvn@3882 | 556 | // Min size which can be loaded into vector is 4 bytes. |
kvn@3882 | 557 | int size = (type2aelembytes(bt) == 1) ? 4 : 2; |
kvn@3882 | 558 | return MIN2(size,max_size); |
kvn@3882 | 559 | } |
kvn@3882 | 560 | |
kvn@3882 | 561 | // Vector ideal reg corresponding to specidied size in bytes |
kvn@3882 | 562 | const int Matcher::vector_ideal_reg(int size) { |
kvn@3882 | 563 | assert(MaxVectorSize >= size, ""); |
kvn@3882 | 564 | switch(size) { |
kvn@3882 | 565 | case 4: return Op_VecS; |
kvn@3882 | 566 | case 8: return Op_VecD; |
kvn@3882 | 567 | case 16: return Op_VecX; |
kvn@3882 | 568 | case 32: return Op_VecY; |
kvn@3882 | 569 | } |
kvn@3882 | 570 | ShouldNotReachHere(); |
kvn@3882 | 571 | return 0; |
kvn@3882 | 572 | } |
kvn@3882 | 573 | |
kvn@4134 | 574 | // Only lowest bits of xmm reg are used for vector shift count. |
kvn@4134 | 575 | const int Matcher::vector_shift_count_ideal_reg(int size) { |
kvn@4134 | 576 | return Op_VecS; |
kvn@4134 | 577 | } |
kvn@4134 | 578 | |
kvn@3882 | 579 | // x86 supports misaligned vectors store/load. |
kvn@3882 | 580 | const bool Matcher::misaligned_vectors_ok() { |
kvn@3882 | 581 | return !AlignVector; // can be changed by flag |
kvn@3882 | 582 | } |
kvn@3882 | 583 | |
kvn@3882 | 584 | // Helper methods for MachSpillCopyNode::implementation(). |
kvn@3882 | 585 | static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, |
kvn@3882 | 586 | int src_hi, int dst_hi, uint ireg, outputStream* st) { |
kvn@3882 | 587 | // In 64-bit VM size calculation is very complex. Emitting instructions |
kvn@3882 | 588 | // into scratch buffer is used to get size in 64-bit VM. |
kvn@3882 | 589 | LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
kvn@3882 | 590 | assert(ireg == Op_VecS || // 32bit vector |
kvn@3882 | 591 | (src_lo & 1) == 0 && (src_lo + 1) == src_hi && |
kvn@3882 | 592 | (dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi, |
kvn@3882 | 593 | "no non-adjacent vector moves" ); |
kvn@3882 | 594 | if (cbuf) { |
kvn@3882 | 595 | MacroAssembler _masm(cbuf); |
kvn@3882 | 596 | int offset = __ offset(); |
kvn@3882 | 597 | switch (ireg) { |
kvn@3882 | 598 | case Op_VecS: // copy whole register |
kvn@3882 | 599 | case Op_VecD: |
kvn@3882 | 600 | case Op_VecX: |
kvn@3882 | 601 | __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
kvn@3882 | 602 | break; |
kvn@3882 | 603 | case Op_VecY: |
kvn@3882 | 604 | __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
kvn@3882 | 605 | break; |
kvn@3882 | 606 | default: |
kvn@3882 | 607 | ShouldNotReachHere(); |
kvn@3882 | 608 | } |
kvn@3882 | 609 | int size = __ offset() - offset; |
kvn@3882 | 610 | #ifdef ASSERT |
kvn@3882 | 611 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 612 | assert(!do_size || size == 4, "incorrect size calculattion"); |
kvn@3882 | 613 | #endif |
kvn@3882 | 614 | return size; |
kvn@3882 | 615 | #ifndef PRODUCT |
kvn@3882 | 616 | } else if (!do_size) { |
kvn@3882 | 617 | switch (ireg) { |
kvn@3882 | 618 | case Op_VecS: |
kvn@3882 | 619 | case Op_VecD: |
kvn@3882 | 620 | case Op_VecX: |
kvn@3882 | 621 | st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
kvn@3882 | 622 | break; |
kvn@3882 | 623 | case Op_VecY: |
kvn@3882 | 624 | st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
kvn@3882 | 625 | break; |
kvn@3882 | 626 | default: |
kvn@3882 | 627 | ShouldNotReachHere(); |
kvn@3882 | 628 | } |
kvn@3882 | 629 | #endif |
kvn@3882 | 630 | } |
kvn@3882 | 631 | // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 632 | return 4; |
kvn@3882 | 633 | } |
kvn@3882 | 634 | |
kvn@3882 | 635 | static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, |
kvn@3882 | 636 | int stack_offset, int reg, uint ireg, outputStream* st) { |
kvn@3882 | 637 | // In 64-bit VM size calculation is very complex. Emitting instructions |
kvn@3882 | 638 | // into scratch buffer is used to get size in 64-bit VM. |
kvn@3882 | 639 | LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
kvn@3882 | 640 | if (cbuf) { |
kvn@3882 | 641 | MacroAssembler _masm(cbuf); |
kvn@3882 | 642 | int offset = __ offset(); |
kvn@3882 | 643 | if (is_load) { |
kvn@3882 | 644 | switch (ireg) { |
kvn@3882 | 645 | case Op_VecS: |
kvn@3882 | 646 | __ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 647 | break; |
kvn@3882 | 648 | case Op_VecD: |
kvn@3882 | 649 | __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 650 | break; |
kvn@3882 | 651 | case Op_VecX: |
kvn@3882 | 652 | __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 653 | break; |
kvn@3882 | 654 | case Op_VecY: |
kvn@3882 | 655 | __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 656 | break; |
kvn@3882 | 657 | default: |
kvn@3882 | 658 | ShouldNotReachHere(); |
kvn@3882 | 659 | } |
kvn@3882 | 660 | } else { // store |
kvn@3882 | 661 | switch (ireg) { |
kvn@3882 | 662 | case Op_VecS: |
kvn@3882 | 663 | __ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 664 | break; |
kvn@3882 | 665 | case Op_VecD: |
kvn@3882 | 666 | __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 667 | break; |
kvn@3882 | 668 | case Op_VecX: |
kvn@3882 | 669 | __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 670 | break; |
kvn@3882 | 671 | case Op_VecY: |
kvn@3882 | 672 | __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 673 | break; |
kvn@3882 | 674 | default: |
kvn@3882 | 675 | ShouldNotReachHere(); |
kvn@3882 | 676 | } |
kvn@3882 | 677 | } |
kvn@3882 | 678 | int size = __ offset() - offset; |
kvn@3882 | 679 | #ifdef ASSERT |
kvn@3882 | 680 | int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); |
kvn@3882 | 681 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 682 | assert(!do_size || size == (5+offset_size), "incorrect size calculattion"); |
kvn@3882 | 683 | #endif |
kvn@3882 | 684 | return size; |
kvn@3882 | 685 | #ifndef PRODUCT |
kvn@3882 | 686 | } else if (!do_size) { |
kvn@3882 | 687 | if (is_load) { |
kvn@3882 | 688 | switch (ireg) { |
kvn@3882 | 689 | case Op_VecS: |
kvn@3882 | 690 | st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 691 | break; |
kvn@3882 | 692 | case Op_VecD: |
kvn@3882 | 693 | st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 694 | break; |
kvn@3882 | 695 | case Op_VecX: |
kvn@3882 | 696 | st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 697 | break; |
kvn@3882 | 698 | case Op_VecY: |
kvn@3882 | 699 | st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 700 | break; |
kvn@3882 | 701 | default: |
kvn@3882 | 702 | ShouldNotReachHere(); |
kvn@3882 | 703 | } |
kvn@3882 | 704 | } else { // store |
kvn@3882 | 705 | switch (ireg) { |
kvn@3882 | 706 | case Op_VecS: |
kvn@3882 | 707 | st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 708 | break; |
kvn@3882 | 709 | case Op_VecD: |
kvn@3882 | 710 | st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 711 | break; |
kvn@3882 | 712 | case Op_VecX: |
kvn@3882 | 713 | st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 714 | break; |
kvn@3882 | 715 | case Op_VecY: |
kvn@3882 | 716 | st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 717 | break; |
kvn@3882 | 718 | default: |
kvn@3882 | 719 | ShouldNotReachHere(); |
kvn@3882 | 720 | } |
kvn@3882 | 721 | } |
kvn@3882 | 722 | #endif |
kvn@3882 | 723 | } |
kvn@3882 | 724 | int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); |
kvn@3882 | 725 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 726 | return 5+offset_size; |
kvn@3882 | 727 | } |
kvn@3882 | 728 | |
kvn@3882 | 729 | static inline jfloat replicate4_imm(int con, int width) { |
kvn@3882 | 730 | // Load a constant of "width" (in bytes) and replicate it to fill 32bit. |
kvn@3882 | 731 | assert(width == 1 || width == 2, "only byte or short types here"); |
kvn@3882 | 732 | int bit_width = width * 8; |
kvn@3882 | 733 | jint val = con; |
kvn@3882 | 734 | val &= (1 << bit_width) - 1; // mask off sign bits |
kvn@3882 | 735 | while(bit_width < 32) { |
kvn@3882 | 736 | val |= (val << bit_width); |
kvn@3882 | 737 | bit_width <<= 1; |
kvn@3882 | 738 | } |
kvn@3882 | 739 | jfloat fval = *((jfloat*) &val); // coerce to float type |
kvn@3882 | 740 | return fval; |
kvn@3882 | 741 | } |
kvn@3882 | 742 | |
kvn@3882 | 743 | static inline jdouble replicate8_imm(int con, int width) { |
kvn@3882 | 744 | // Load a constant of "width" (in bytes) and replicate it to fill 64bit. |
kvn@3882 | 745 | assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here"); |
kvn@3882 | 746 | int bit_width = width * 8; |
kvn@3882 | 747 | jlong val = con; |
kvn@3882 | 748 | val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits |
kvn@3882 | 749 | while(bit_width < 64) { |
kvn@3882 | 750 | val |= (val << bit_width); |
kvn@3882 | 751 | bit_width <<= 1; |
kvn@3882 | 752 | } |
kvn@3882 | 753 | jdouble dval = *((jdouble*) &val); // coerce to double type |
kvn@3882 | 754 | return dval; |
kvn@3882 | 755 | } |
kvn@3882 | 756 | |
kvn@3577 | 757 | #ifndef PRODUCT |
kvn@3577 | 758 | void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { |
kvn@3577 | 759 | st->print("nop \t# %d bytes pad for loops and calls", _count); |
kvn@3577 | 760 | } |
kvn@3577 | 761 | #endif |
kvn@3577 | 762 | |
kvn@3577 | 763 | void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { |
kvn@3577 | 764 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 765 | __ nop(_count); |
kvn@3577 | 766 | } |
kvn@3577 | 767 | |
kvn@3577 | 768 | uint MachNopNode::size(PhaseRegAlloc*) const { |
kvn@3577 | 769 | return _count; |
kvn@3577 | 770 | } |
kvn@3577 | 771 | |
kvn@3577 | 772 | #ifndef PRODUCT |
kvn@3577 | 773 | void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const { |
kvn@3577 | 774 | st->print("# breakpoint"); |
kvn@3577 | 775 | } |
kvn@3577 | 776 | #endif |
kvn@3577 | 777 | |
kvn@3577 | 778 | void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const { |
kvn@3577 | 779 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 780 | __ int3(); |
kvn@3577 | 781 | } |
kvn@3577 | 782 | |
kvn@3577 | 783 | uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { |
kvn@3577 | 784 | return MachNode::size(ra_); |
kvn@3577 | 785 | } |
kvn@3577 | 786 | |
kvn@3577 | 787 | %} |
kvn@3577 | 788 | |
kvn@3577 | 789 | encode %{ |
kvn@3577 | 790 | |
kvn@3577 | 791 | enc_class preserve_SP %{ |
kvn@3577 | 792 | debug_only(int off0 = cbuf.insts_size()); |
kvn@3577 | 793 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 794 | // RBP is preserved across all calls, even compiled calls. |
kvn@3577 | 795 | // Use it to preserve RSP in places where the callee might change the SP. |
kvn@3577 | 796 | __ movptr(rbp_mh_SP_save, rsp); |
kvn@3577 | 797 | debug_only(int off1 = cbuf.insts_size()); |
kvn@3577 | 798 | assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); |
kvn@3577 | 799 | %} |
kvn@3577 | 800 | |
kvn@3577 | 801 | enc_class restore_SP %{ |
kvn@3577 | 802 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 803 | __ movptr(rsp, rbp_mh_SP_save); |
kvn@3577 | 804 | %} |
kvn@3577 | 805 | |
kvn@3577 | 806 | enc_class call_epilog %{ |
kvn@3577 | 807 | if (VerifyStackAtCalls) { |
kvn@3577 | 808 | // Check that stack depth is unchanged: find majik cookie on stack |
kvn@3577 | 809 | int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word)); |
kvn@3577 | 810 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 811 | Label L; |
kvn@3577 | 812 | __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d); |
kvn@3577 | 813 | __ jccb(Assembler::equal, L); |
kvn@3577 | 814 | // Die if stack mismatch |
kvn@3577 | 815 | __ int3(); |
kvn@3577 | 816 | __ bind(L); |
kvn@3577 | 817 | } |
kvn@3577 | 818 | %} |
kvn@3577 | 819 | |
kvn@3390 | 820 | %} |
kvn@3390 | 821 | |
kvn@3882 | 822 | |
kvn@3882 | 823 | //----------OPERANDS----------------------------------------------------------- |
kvn@3882 | 824 | // Operand definitions must precede instruction definitions for correct parsing |
kvn@3882 | 825 | // in the ADLC because operands constitute user defined types which are used in |
kvn@3882 | 826 | // instruction definitions. |
kvn@3882 | 827 | |
kvn@3882 | 828 | // Vectors |
kvn@3882 | 829 | operand vecS() %{ |
kvn@3882 | 830 | constraint(ALLOC_IN_RC(vectors_reg)); |
kvn@3882 | 831 | match(VecS); |
kvn@3882 | 832 | |
kvn@3882 | 833 | format %{ %} |
kvn@3882 | 834 | interface(REG_INTER); |
kvn@3882 | 835 | %} |
kvn@3882 | 836 | |
kvn@3882 | 837 | operand vecD() %{ |
kvn@3882 | 838 | constraint(ALLOC_IN_RC(vectord_reg)); |
kvn@3882 | 839 | match(VecD); |
kvn@3882 | 840 | |
kvn@3882 | 841 | format %{ %} |
kvn@3882 | 842 | interface(REG_INTER); |
kvn@3882 | 843 | %} |
kvn@3882 | 844 | |
kvn@3882 | 845 | operand vecX() %{ |
kvn@3882 | 846 | constraint(ALLOC_IN_RC(vectorx_reg)); |
kvn@3882 | 847 | match(VecX); |
kvn@3882 | 848 | |
kvn@3882 | 849 | format %{ %} |
kvn@3882 | 850 | interface(REG_INTER); |
kvn@3882 | 851 | %} |
kvn@3882 | 852 | |
kvn@3882 | 853 | operand vecY() %{ |
kvn@3882 | 854 | constraint(ALLOC_IN_RC(vectory_reg)); |
kvn@3882 | 855 | match(VecY); |
kvn@3882 | 856 | |
kvn@3882 | 857 | format %{ %} |
kvn@3882 | 858 | interface(REG_INTER); |
kvn@3882 | 859 | %} |
kvn@3882 | 860 | |
kvn@3882 | 861 | |
kvn@3390 | 862 | // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit) |
kvn@3390 | 863 | |
kvn@3577 | 864 | // ============================================================================ |
kvn@3577 | 865 | |
kvn@3577 | 866 | instruct ShouldNotReachHere() %{ |
kvn@3577 | 867 | match(Halt); |
kvn@3577 | 868 | format %{ "int3\t# ShouldNotReachHere" %} |
kvn@3577 | 869 | ins_encode %{ |
kvn@3577 | 870 | __ int3(); |
kvn@3577 | 871 | %} |
kvn@3577 | 872 | ins_pipe(pipe_slow); |
kvn@3577 | 873 | %} |
kvn@3577 | 874 | |
kvn@3577 | 875 | // ============================================================================ |
kvn@3577 | 876 | |
kvn@3390 | 877 | instruct addF_reg(regF dst, regF src) %{ |
kvn@3390 | 878 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 879 | match(Set dst (AddF dst src)); |
kvn@3390 | 880 | |
kvn@3390 | 881 | format %{ "addss $dst, $src" %} |
kvn@3390 | 882 | ins_cost(150); |
kvn@3390 | 883 | ins_encode %{ |
kvn@3390 | 884 | __ addss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 885 | %} |
kvn@3390 | 886 | ins_pipe(pipe_slow); |
kvn@3390 | 887 | %} |
kvn@3390 | 888 | |
kvn@3390 | 889 | instruct addF_mem(regF dst, memory src) %{ |
kvn@3390 | 890 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 891 | match(Set dst (AddF dst (LoadF src))); |
kvn@3390 | 892 | |
kvn@3390 | 893 | format %{ "addss $dst, $src" %} |
kvn@3390 | 894 | ins_cost(150); |
kvn@3390 | 895 | ins_encode %{ |
kvn@3390 | 896 | __ addss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 897 | %} |
kvn@3390 | 898 | ins_pipe(pipe_slow); |
kvn@3390 | 899 | %} |
kvn@3390 | 900 | |
kvn@3390 | 901 | instruct addF_imm(regF dst, immF con) %{ |
kvn@3390 | 902 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 903 | match(Set dst (AddF dst con)); |
kvn@3390 | 904 | format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 905 | ins_cost(150); |
kvn@3390 | 906 | ins_encode %{ |
kvn@3390 | 907 | __ addss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 908 | %} |
kvn@3390 | 909 | ins_pipe(pipe_slow); |
kvn@3390 | 910 | %} |
kvn@3390 | 911 | |
kvn@3929 | 912 | instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 913 | predicate(UseAVX > 0); |
kvn@3390 | 914 | match(Set dst (AddF src1 src2)); |
kvn@3390 | 915 | |
kvn@3390 | 916 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 917 | ins_cost(150); |
kvn@3390 | 918 | ins_encode %{ |
kvn@3390 | 919 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 920 | %} |
kvn@3390 | 921 | ins_pipe(pipe_slow); |
kvn@3390 | 922 | %} |
kvn@3390 | 923 | |
kvn@3929 | 924 | instruct addF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 925 | predicate(UseAVX > 0); |
kvn@3390 | 926 | match(Set dst (AddF src1 (LoadF src2))); |
kvn@3390 | 927 | |
kvn@3390 | 928 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 929 | ins_cost(150); |
kvn@3390 | 930 | ins_encode %{ |
kvn@3390 | 931 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 932 | %} |
kvn@3390 | 933 | ins_pipe(pipe_slow); |
kvn@3390 | 934 | %} |
kvn@3390 | 935 | |
kvn@3929 | 936 | instruct addF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 937 | predicate(UseAVX > 0); |
kvn@3390 | 938 | match(Set dst (AddF src con)); |
kvn@3390 | 939 | |
kvn@3390 | 940 | format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 941 | ins_cost(150); |
kvn@3390 | 942 | ins_encode %{ |
kvn@3390 | 943 | __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 944 | %} |
kvn@3390 | 945 | ins_pipe(pipe_slow); |
kvn@3390 | 946 | %} |
kvn@3390 | 947 | |
kvn@3390 | 948 | instruct addD_reg(regD dst, regD src) %{ |
kvn@3390 | 949 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 950 | match(Set dst (AddD dst src)); |
kvn@3390 | 951 | |
kvn@3390 | 952 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 953 | ins_cost(150); |
kvn@3390 | 954 | ins_encode %{ |
kvn@3390 | 955 | __ addsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 956 | %} |
kvn@3390 | 957 | ins_pipe(pipe_slow); |
kvn@3390 | 958 | %} |
kvn@3390 | 959 | |
kvn@3390 | 960 | instruct addD_mem(regD dst, memory src) %{ |
kvn@3390 | 961 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 962 | match(Set dst (AddD dst (LoadD src))); |
kvn@3390 | 963 | |
kvn@3390 | 964 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 965 | ins_cost(150); |
kvn@3390 | 966 | ins_encode %{ |
kvn@3390 | 967 | __ addsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 968 | %} |
kvn@3390 | 969 | ins_pipe(pipe_slow); |
kvn@3390 | 970 | %} |
kvn@3390 | 971 | |
kvn@3390 | 972 | instruct addD_imm(regD dst, immD con) %{ |
kvn@3390 | 973 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 974 | match(Set dst (AddD dst con)); |
kvn@3390 | 975 | format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 976 | ins_cost(150); |
kvn@3390 | 977 | ins_encode %{ |
kvn@3390 | 978 | __ addsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 979 | %} |
kvn@3390 | 980 | ins_pipe(pipe_slow); |
kvn@3390 | 981 | %} |
kvn@3390 | 982 | |
kvn@3929 | 983 | instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 984 | predicate(UseAVX > 0); |
kvn@3390 | 985 | match(Set dst (AddD src1 src2)); |
kvn@3390 | 986 | |
kvn@3390 | 987 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 988 | ins_cost(150); |
kvn@3390 | 989 | ins_encode %{ |
kvn@3390 | 990 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 991 | %} |
kvn@3390 | 992 | ins_pipe(pipe_slow); |
kvn@3390 | 993 | %} |
kvn@3390 | 994 | |
kvn@3929 | 995 | instruct addD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 996 | predicate(UseAVX > 0); |
kvn@3390 | 997 | match(Set dst (AddD src1 (LoadD src2))); |
kvn@3390 | 998 | |
kvn@3390 | 999 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 1000 | ins_cost(150); |
kvn@3390 | 1001 | ins_encode %{ |
kvn@3390 | 1002 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1003 | %} |
kvn@3390 | 1004 | ins_pipe(pipe_slow); |
kvn@3390 | 1005 | %} |
kvn@3390 | 1006 | |
kvn@3929 | 1007 | instruct addD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1008 | predicate(UseAVX > 0); |
kvn@3390 | 1009 | match(Set dst (AddD src con)); |
kvn@3390 | 1010 | |
kvn@3390 | 1011 | format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1012 | ins_cost(150); |
kvn@3390 | 1013 | ins_encode %{ |
kvn@3390 | 1014 | __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1015 | %} |
kvn@3390 | 1016 | ins_pipe(pipe_slow); |
kvn@3390 | 1017 | %} |
kvn@3390 | 1018 | |
kvn@3390 | 1019 | instruct subF_reg(regF dst, regF src) %{ |
kvn@3390 | 1020 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1021 | match(Set dst (SubF dst src)); |
kvn@3390 | 1022 | |
kvn@3390 | 1023 | format %{ "subss $dst, $src" %} |
kvn@3390 | 1024 | ins_cost(150); |
kvn@3390 | 1025 | ins_encode %{ |
kvn@3390 | 1026 | __ subss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1027 | %} |
kvn@3390 | 1028 | ins_pipe(pipe_slow); |
kvn@3390 | 1029 | %} |
kvn@3390 | 1030 | |
kvn@3390 | 1031 | instruct subF_mem(regF dst, memory src) %{ |
kvn@3390 | 1032 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1033 | match(Set dst (SubF dst (LoadF src))); |
kvn@3390 | 1034 | |
kvn@3390 | 1035 | format %{ "subss $dst, $src" %} |
kvn@3390 | 1036 | ins_cost(150); |
kvn@3390 | 1037 | ins_encode %{ |
kvn@3390 | 1038 | __ subss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1039 | %} |
kvn@3390 | 1040 | ins_pipe(pipe_slow); |
kvn@3390 | 1041 | %} |
kvn@3390 | 1042 | |
kvn@3390 | 1043 | instruct subF_imm(regF dst, immF con) %{ |
kvn@3390 | 1044 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1045 | match(Set dst (SubF dst con)); |
kvn@3390 | 1046 | format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1047 | ins_cost(150); |
kvn@3390 | 1048 | ins_encode %{ |
kvn@3390 | 1049 | __ subss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1050 | %} |
kvn@3390 | 1051 | ins_pipe(pipe_slow); |
kvn@3390 | 1052 | %} |
kvn@3390 | 1053 | |
kvn@3929 | 1054 | instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1055 | predicate(UseAVX > 0); |
kvn@3390 | 1056 | match(Set dst (SubF src1 src2)); |
kvn@3390 | 1057 | |
kvn@3390 | 1058 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 1059 | ins_cost(150); |
kvn@3390 | 1060 | ins_encode %{ |
kvn@3390 | 1061 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1062 | %} |
kvn@3390 | 1063 | ins_pipe(pipe_slow); |
kvn@3390 | 1064 | %} |
kvn@3390 | 1065 | |
kvn@3929 | 1066 | instruct subF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1067 | predicate(UseAVX > 0); |
kvn@3390 | 1068 | match(Set dst (SubF src1 (LoadF src2))); |
kvn@3390 | 1069 | |
kvn@3390 | 1070 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 1071 | ins_cost(150); |
kvn@3390 | 1072 | ins_encode %{ |
kvn@3390 | 1073 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1074 | %} |
kvn@3390 | 1075 | ins_pipe(pipe_slow); |
kvn@3390 | 1076 | %} |
kvn@3390 | 1077 | |
kvn@3929 | 1078 | instruct subF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1079 | predicate(UseAVX > 0); |
kvn@3390 | 1080 | match(Set dst (SubF src con)); |
kvn@3390 | 1081 | |
kvn@3390 | 1082 | format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1083 | ins_cost(150); |
kvn@3390 | 1084 | ins_encode %{ |
kvn@3390 | 1085 | __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1086 | %} |
kvn@3390 | 1087 | ins_pipe(pipe_slow); |
kvn@3390 | 1088 | %} |
kvn@3390 | 1089 | |
kvn@3390 | 1090 | instruct subD_reg(regD dst, regD src) %{ |
kvn@3390 | 1091 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1092 | match(Set dst (SubD dst src)); |
kvn@3390 | 1093 | |
kvn@3390 | 1094 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 1095 | ins_cost(150); |
kvn@3390 | 1096 | ins_encode %{ |
kvn@3390 | 1097 | __ subsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1098 | %} |
kvn@3390 | 1099 | ins_pipe(pipe_slow); |
kvn@3390 | 1100 | %} |
kvn@3390 | 1101 | |
kvn@3390 | 1102 | instruct subD_mem(regD dst, memory src) %{ |
kvn@3390 | 1103 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1104 | match(Set dst (SubD dst (LoadD src))); |
kvn@3390 | 1105 | |
kvn@3390 | 1106 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 1107 | ins_cost(150); |
kvn@3390 | 1108 | ins_encode %{ |
kvn@3390 | 1109 | __ subsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1110 | %} |
kvn@3390 | 1111 | ins_pipe(pipe_slow); |
kvn@3390 | 1112 | %} |
kvn@3390 | 1113 | |
kvn@3390 | 1114 | instruct subD_imm(regD dst, immD con) %{ |
kvn@3390 | 1115 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1116 | match(Set dst (SubD dst con)); |
kvn@3390 | 1117 | format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1118 | ins_cost(150); |
kvn@3390 | 1119 | ins_encode %{ |
kvn@3390 | 1120 | __ subsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1121 | %} |
kvn@3390 | 1122 | ins_pipe(pipe_slow); |
kvn@3390 | 1123 | %} |
kvn@3390 | 1124 | |
kvn@3929 | 1125 | instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1126 | predicate(UseAVX > 0); |
kvn@3390 | 1127 | match(Set dst (SubD src1 src2)); |
kvn@3390 | 1128 | |
kvn@3390 | 1129 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 1130 | ins_cost(150); |
kvn@3390 | 1131 | ins_encode %{ |
kvn@3390 | 1132 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1133 | %} |
kvn@3390 | 1134 | ins_pipe(pipe_slow); |
kvn@3390 | 1135 | %} |
kvn@3390 | 1136 | |
kvn@3929 | 1137 | instruct subD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1138 | predicate(UseAVX > 0); |
kvn@3390 | 1139 | match(Set dst (SubD src1 (LoadD src2))); |
kvn@3390 | 1140 | |
kvn@3390 | 1141 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 1142 | ins_cost(150); |
kvn@3390 | 1143 | ins_encode %{ |
kvn@3390 | 1144 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1145 | %} |
kvn@3390 | 1146 | ins_pipe(pipe_slow); |
kvn@3390 | 1147 | %} |
kvn@3390 | 1148 | |
kvn@3929 | 1149 | instruct subD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1150 | predicate(UseAVX > 0); |
kvn@3390 | 1151 | match(Set dst (SubD src con)); |
kvn@3390 | 1152 | |
kvn@3390 | 1153 | format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1154 | ins_cost(150); |
kvn@3390 | 1155 | ins_encode %{ |
kvn@3390 | 1156 | __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1157 | %} |
kvn@3390 | 1158 | ins_pipe(pipe_slow); |
kvn@3390 | 1159 | %} |
kvn@3390 | 1160 | |
kvn@3390 | 1161 | instruct mulF_reg(regF dst, regF src) %{ |
kvn@3390 | 1162 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1163 | match(Set dst (MulF dst src)); |
kvn@3390 | 1164 | |
kvn@3390 | 1165 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 1166 | ins_cost(150); |
kvn@3390 | 1167 | ins_encode %{ |
kvn@3390 | 1168 | __ mulss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1169 | %} |
kvn@3390 | 1170 | ins_pipe(pipe_slow); |
kvn@3390 | 1171 | %} |
kvn@3390 | 1172 | |
kvn@3390 | 1173 | instruct mulF_mem(regF dst, memory src) %{ |
kvn@3390 | 1174 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1175 | match(Set dst (MulF dst (LoadF src))); |
kvn@3390 | 1176 | |
kvn@3390 | 1177 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 1178 | ins_cost(150); |
kvn@3390 | 1179 | ins_encode %{ |
kvn@3390 | 1180 | __ mulss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1181 | %} |
kvn@3390 | 1182 | ins_pipe(pipe_slow); |
kvn@3390 | 1183 | %} |
kvn@3390 | 1184 | |
kvn@3390 | 1185 | instruct mulF_imm(regF dst, immF con) %{ |
kvn@3390 | 1186 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1187 | match(Set dst (MulF dst con)); |
kvn@3390 | 1188 | format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1189 | ins_cost(150); |
kvn@3390 | 1190 | ins_encode %{ |
kvn@3390 | 1191 | __ mulss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1192 | %} |
kvn@3390 | 1193 | ins_pipe(pipe_slow); |
kvn@3390 | 1194 | %} |
kvn@3390 | 1195 | |
kvn@3929 | 1196 | instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1197 | predicate(UseAVX > 0); |
kvn@3390 | 1198 | match(Set dst (MulF src1 src2)); |
kvn@3390 | 1199 | |
kvn@3390 | 1200 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 1201 | ins_cost(150); |
kvn@3390 | 1202 | ins_encode %{ |
kvn@3390 | 1203 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1204 | %} |
kvn@3390 | 1205 | ins_pipe(pipe_slow); |
kvn@3390 | 1206 | %} |
kvn@3390 | 1207 | |
kvn@3929 | 1208 | instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1209 | predicate(UseAVX > 0); |
kvn@3390 | 1210 | match(Set dst (MulF src1 (LoadF src2))); |
kvn@3390 | 1211 | |
kvn@3390 | 1212 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 1213 | ins_cost(150); |
kvn@3390 | 1214 | ins_encode %{ |
kvn@3390 | 1215 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1216 | %} |
kvn@3390 | 1217 | ins_pipe(pipe_slow); |
kvn@3390 | 1218 | %} |
kvn@3390 | 1219 | |
kvn@3929 | 1220 | instruct mulF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1221 | predicate(UseAVX > 0); |
kvn@3390 | 1222 | match(Set dst (MulF src con)); |
kvn@3390 | 1223 | |
kvn@3390 | 1224 | format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1225 | ins_cost(150); |
kvn@3390 | 1226 | ins_encode %{ |
kvn@3390 | 1227 | __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1228 | %} |
kvn@3390 | 1229 | ins_pipe(pipe_slow); |
kvn@3390 | 1230 | %} |
kvn@3390 | 1231 | |
kvn@3390 | 1232 | instruct mulD_reg(regD dst, regD src) %{ |
kvn@3390 | 1233 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1234 | match(Set dst (MulD dst src)); |
kvn@3390 | 1235 | |
kvn@3390 | 1236 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 1237 | ins_cost(150); |
kvn@3390 | 1238 | ins_encode %{ |
kvn@3390 | 1239 | __ mulsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1240 | %} |
kvn@3390 | 1241 | ins_pipe(pipe_slow); |
kvn@3390 | 1242 | %} |
kvn@3390 | 1243 | |
kvn@3390 | 1244 | instruct mulD_mem(regD dst, memory src) %{ |
kvn@3390 | 1245 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1246 | match(Set dst (MulD dst (LoadD src))); |
kvn@3390 | 1247 | |
kvn@3390 | 1248 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 1249 | ins_cost(150); |
kvn@3390 | 1250 | ins_encode %{ |
kvn@3390 | 1251 | __ mulsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1252 | %} |
kvn@3390 | 1253 | ins_pipe(pipe_slow); |
kvn@3390 | 1254 | %} |
kvn@3390 | 1255 | |
kvn@3390 | 1256 | instruct mulD_imm(regD dst, immD con) %{ |
kvn@3390 | 1257 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1258 | match(Set dst (MulD dst con)); |
kvn@3390 | 1259 | format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1260 | ins_cost(150); |
kvn@3390 | 1261 | ins_encode %{ |
kvn@3390 | 1262 | __ mulsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1263 | %} |
kvn@3390 | 1264 | ins_pipe(pipe_slow); |
kvn@3390 | 1265 | %} |
kvn@3390 | 1266 | |
kvn@3929 | 1267 | instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1268 | predicate(UseAVX > 0); |
kvn@3390 | 1269 | match(Set dst (MulD src1 src2)); |
kvn@3390 | 1270 | |
kvn@3390 | 1271 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 1272 | ins_cost(150); |
kvn@3390 | 1273 | ins_encode %{ |
kvn@3390 | 1274 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1275 | %} |
kvn@3390 | 1276 | ins_pipe(pipe_slow); |
kvn@3390 | 1277 | %} |
kvn@3390 | 1278 | |
kvn@3929 | 1279 | instruct mulD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1280 | predicate(UseAVX > 0); |
kvn@3390 | 1281 | match(Set dst (MulD src1 (LoadD src2))); |
kvn@3390 | 1282 | |
kvn@3390 | 1283 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 1284 | ins_cost(150); |
kvn@3390 | 1285 | ins_encode %{ |
kvn@3390 | 1286 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1287 | %} |
kvn@3390 | 1288 | ins_pipe(pipe_slow); |
kvn@3390 | 1289 | %} |
kvn@3390 | 1290 | |
kvn@3929 | 1291 | instruct mulD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1292 | predicate(UseAVX > 0); |
kvn@3390 | 1293 | match(Set dst (MulD src con)); |
kvn@3390 | 1294 | |
kvn@3390 | 1295 | format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1296 | ins_cost(150); |
kvn@3390 | 1297 | ins_encode %{ |
kvn@3390 | 1298 | __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1299 | %} |
kvn@3390 | 1300 | ins_pipe(pipe_slow); |
kvn@3390 | 1301 | %} |
kvn@3390 | 1302 | |
kvn@3390 | 1303 | instruct divF_reg(regF dst, regF src) %{ |
kvn@3390 | 1304 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1305 | match(Set dst (DivF dst src)); |
kvn@3390 | 1306 | |
kvn@3390 | 1307 | format %{ "divss $dst, $src" %} |
kvn@3390 | 1308 | ins_cost(150); |
kvn@3390 | 1309 | ins_encode %{ |
kvn@3390 | 1310 | __ divss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1311 | %} |
kvn@3390 | 1312 | ins_pipe(pipe_slow); |
kvn@3390 | 1313 | %} |
kvn@3390 | 1314 | |
kvn@3390 | 1315 | instruct divF_mem(regF dst, memory src) %{ |
kvn@3390 | 1316 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1317 | match(Set dst (DivF dst (LoadF src))); |
kvn@3390 | 1318 | |
kvn@3390 | 1319 | format %{ "divss $dst, $src" %} |
kvn@3390 | 1320 | ins_cost(150); |
kvn@3390 | 1321 | ins_encode %{ |
kvn@3390 | 1322 | __ divss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1323 | %} |
kvn@3390 | 1324 | ins_pipe(pipe_slow); |
kvn@3390 | 1325 | %} |
kvn@3390 | 1326 | |
kvn@3390 | 1327 | instruct divF_imm(regF dst, immF con) %{ |
kvn@3390 | 1328 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1329 | match(Set dst (DivF dst con)); |
kvn@3390 | 1330 | format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1331 | ins_cost(150); |
kvn@3390 | 1332 | ins_encode %{ |
kvn@3390 | 1333 | __ divss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1334 | %} |
kvn@3390 | 1335 | ins_pipe(pipe_slow); |
kvn@3390 | 1336 | %} |
kvn@3390 | 1337 | |
kvn@3929 | 1338 | instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1339 | predicate(UseAVX > 0); |
kvn@3390 | 1340 | match(Set dst (DivF src1 src2)); |
kvn@3390 | 1341 | |
kvn@3390 | 1342 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 1343 | ins_cost(150); |
kvn@3390 | 1344 | ins_encode %{ |
kvn@3390 | 1345 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1346 | %} |
kvn@3390 | 1347 | ins_pipe(pipe_slow); |
kvn@3390 | 1348 | %} |
kvn@3390 | 1349 | |
kvn@3929 | 1350 | instruct divF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1351 | predicate(UseAVX > 0); |
kvn@3390 | 1352 | match(Set dst (DivF src1 (LoadF src2))); |
kvn@3390 | 1353 | |
kvn@3390 | 1354 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 1355 | ins_cost(150); |
kvn@3390 | 1356 | ins_encode %{ |
kvn@3390 | 1357 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1358 | %} |
kvn@3390 | 1359 | ins_pipe(pipe_slow); |
kvn@3390 | 1360 | %} |
kvn@3390 | 1361 | |
kvn@3929 | 1362 | instruct divF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1363 | predicate(UseAVX > 0); |
kvn@3390 | 1364 | match(Set dst (DivF src con)); |
kvn@3390 | 1365 | |
kvn@3390 | 1366 | format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1367 | ins_cost(150); |
kvn@3390 | 1368 | ins_encode %{ |
kvn@3390 | 1369 | __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1370 | %} |
kvn@3390 | 1371 | ins_pipe(pipe_slow); |
kvn@3390 | 1372 | %} |
kvn@3390 | 1373 | |
kvn@3390 | 1374 | instruct divD_reg(regD dst, regD src) %{ |
kvn@3390 | 1375 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1376 | match(Set dst (DivD dst src)); |
kvn@3390 | 1377 | |
kvn@3390 | 1378 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 1379 | ins_cost(150); |
kvn@3390 | 1380 | ins_encode %{ |
kvn@3390 | 1381 | __ divsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1382 | %} |
kvn@3390 | 1383 | ins_pipe(pipe_slow); |
kvn@3390 | 1384 | %} |
kvn@3390 | 1385 | |
kvn@3390 | 1386 | instruct divD_mem(regD dst, memory src) %{ |
kvn@3390 | 1387 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1388 | match(Set dst (DivD dst (LoadD src))); |
kvn@3390 | 1389 | |
kvn@3390 | 1390 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 1391 | ins_cost(150); |
kvn@3390 | 1392 | ins_encode %{ |
kvn@3390 | 1393 | __ divsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1394 | %} |
kvn@3390 | 1395 | ins_pipe(pipe_slow); |
kvn@3390 | 1396 | %} |
kvn@3390 | 1397 | |
kvn@3390 | 1398 | instruct divD_imm(regD dst, immD con) %{ |
kvn@3390 | 1399 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1400 | match(Set dst (DivD dst con)); |
kvn@3390 | 1401 | format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1402 | ins_cost(150); |
kvn@3390 | 1403 | ins_encode %{ |
kvn@3390 | 1404 | __ divsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1405 | %} |
kvn@3390 | 1406 | ins_pipe(pipe_slow); |
kvn@3390 | 1407 | %} |
kvn@3390 | 1408 | |
kvn@3929 | 1409 | instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1410 | predicate(UseAVX > 0); |
kvn@3390 | 1411 | match(Set dst (DivD src1 src2)); |
kvn@3390 | 1412 | |
kvn@3390 | 1413 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 1414 | ins_cost(150); |
kvn@3390 | 1415 | ins_encode %{ |
kvn@3390 | 1416 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1417 | %} |
kvn@3390 | 1418 | ins_pipe(pipe_slow); |
kvn@3390 | 1419 | %} |
kvn@3390 | 1420 | |
kvn@3929 | 1421 | instruct divD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1422 | predicate(UseAVX > 0); |
kvn@3390 | 1423 | match(Set dst (DivD src1 (LoadD src2))); |
kvn@3390 | 1424 | |
kvn@3390 | 1425 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 1426 | ins_cost(150); |
kvn@3390 | 1427 | ins_encode %{ |
kvn@3390 | 1428 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1429 | %} |
kvn@3390 | 1430 | ins_pipe(pipe_slow); |
kvn@3390 | 1431 | %} |
kvn@3390 | 1432 | |
kvn@3929 | 1433 | instruct divD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1434 | predicate(UseAVX > 0); |
kvn@3390 | 1435 | match(Set dst (DivD src con)); |
kvn@3390 | 1436 | |
kvn@3390 | 1437 | format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1438 | ins_cost(150); |
kvn@3390 | 1439 | ins_encode %{ |
kvn@3390 | 1440 | __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1441 | %} |
kvn@3390 | 1442 | ins_pipe(pipe_slow); |
kvn@3390 | 1443 | %} |
kvn@3390 | 1444 | |
kvn@3390 | 1445 | instruct absF_reg(regF dst) %{ |
kvn@3390 | 1446 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1447 | match(Set dst (AbsF dst)); |
kvn@3390 | 1448 | ins_cost(150); |
kvn@3390 | 1449 | format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 1450 | ins_encode %{ |
kvn@3390 | 1451 | __ andps($dst$$XMMRegister, ExternalAddress(float_signmask())); |
kvn@3390 | 1452 | %} |
kvn@3390 | 1453 | ins_pipe(pipe_slow); |
kvn@3390 | 1454 | %} |
kvn@3390 | 1455 | |
kvn@3929 | 1456 | instruct absF_reg_reg(regF dst, regF src) %{ |
kvn@3390 | 1457 | predicate(UseAVX > 0); |
kvn@3390 | 1458 | match(Set dst (AbsF src)); |
kvn@3390 | 1459 | ins_cost(150); |
kvn@3390 | 1460 | format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 1461 | ins_encode %{ |
kvn@4001 | 1462 | bool vector256 = false; |
kvn@3390 | 1463 | __ vandps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1464 | ExternalAddress(float_signmask()), vector256); |
kvn@3390 | 1465 | %} |
kvn@3390 | 1466 | ins_pipe(pipe_slow); |
kvn@3390 | 1467 | %} |
kvn@3390 | 1468 | |
kvn@3390 | 1469 | instruct absD_reg(regD dst) %{ |
kvn@3390 | 1470 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1471 | match(Set dst (AbsD dst)); |
kvn@3390 | 1472 | ins_cost(150); |
kvn@3390 | 1473 | format %{ "andpd $dst, [0x7fffffffffffffff]\t" |
kvn@3390 | 1474 | "# abs double by sign masking" %} |
kvn@3390 | 1475 | ins_encode %{ |
kvn@3390 | 1476 | __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask())); |
kvn@3390 | 1477 | %} |
kvn@3390 | 1478 | ins_pipe(pipe_slow); |
kvn@3390 | 1479 | %} |
kvn@3390 | 1480 | |
kvn@3929 | 1481 | instruct absD_reg_reg(regD dst, regD src) %{ |
kvn@3390 | 1482 | predicate(UseAVX > 0); |
kvn@3390 | 1483 | match(Set dst (AbsD src)); |
kvn@3390 | 1484 | ins_cost(150); |
kvn@3390 | 1485 | format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t" |
kvn@3390 | 1486 | "# abs double by sign masking" %} |
kvn@3390 | 1487 | ins_encode %{ |
kvn@4001 | 1488 | bool vector256 = false; |
kvn@3390 | 1489 | __ vandpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1490 | ExternalAddress(double_signmask()), vector256); |
kvn@3390 | 1491 | %} |
kvn@3390 | 1492 | ins_pipe(pipe_slow); |
kvn@3390 | 1493 | %} |
kvn@3390 | 1494 | |
kvn@3390 | 1495 | instruct negF_reg(regF dst) %{ |
kvn@3390 | 1496 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1497 | match(Set dst (NegF dst)); |
kvn@3390 | 1498 | ins_cost(150); |
kvn@3390 | 1499 | format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 1500 | ins_encode %{ |
kvn@3390 | 1501 | __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip())); |
kvn@3390 | 1502 | %} |
kvn@3390 | 1503 | ins_pipe(pipe_slow); |
kvn@3390 | 1504 | %} |
kvn@3390 | 1505 | |
kvn@3929 | 1506 | instruct negF_reg_reg(regF dst, regF src) %{ |
kvn@3390 | 1507 | predicate(UseAVX > 0); |
kvn@3390 | 1508 | match(Set dst (NegF src)); |
kvn@3390 | 1509 | ins_cost(150); |
kvn@3390 | 1510 | format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 1511 | ins_encode %{ |
kvn@4001 | 1512 | bool vector256 = false; |
kvn@3390 | 1513 | __ vxorps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1514 | ExternalAddress(float_signflip()), vector256); |
kvn@3390 | 1515 | %} |
kvn@3390 | 1516 | ins_pipe(pipe_slow); |
kvn@3390 | 1517 | %} |
kvn@3390 | 1518 | |
kvn@3390 | 1519 | instruct negD_reg(regD dst) %{ |
kvn@3390 | 1520 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1521 | match(Set dst (NegD dst)); |
kvn@3390 | 1522 | ins_cost(150); |
kvn@3390 | 1523 | format %{ "xorpd $dst, [0x8000000000000000]\t" |
kvn@3390 | 1524 | "# neg double by sign flipping" %} |
kvn@3390 | 1525 | ins_encode %{ |
kvn@3390 | 1526 | __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip())); |
kvn@3390 | 1527 | %} |
kvn@3390 | 1528 | ins_pipe(pipe_slow); |
kvn@3390 | 1529 | %} |
kvn@3390 | 1530 | |
kvn@3929 | 1531 | instruct negD_reg_reg(regD dst, regD src) %{ |
kvn@3390 | 1532 | predicate(UseAVX > 0); |
kvn@3390 | 1533 | match(Set dst (NegD src)); |
kvn@3390 | 1534 | ins_cost(150); |
kvn@3390 | 1535 | format %{ "vxorpd $dst, $src, [0x8000000000000000]\t" |
kvn@3390 | 1536 | "# neg double by sign flipping" %} |
kvn@3390 | 1537 | ins_encode %{ |
kvn@4001 | 1538 | bool vector256 = false; |
kvn@3390 | 1539 | __ vxorpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1540 | ExternalAddress(double_signflip()), vector256); |
kvn@3390 | 1541 | %} |
kvn@3390 | 1542 | ins_pipe(pipe_slow); |
kvn@3390 | 1543 | %} |
kvn@3390 | 1544 | |
kvn@3390 | 1545 | instruct sqrtF_reg(regF dst, regF src) %{ |
kvn@3390 | 1546 | predicate(UseSSE>=1); |
kvn@3390 | 1547 | match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); |
kvn@3390 | 1548 | |
kvn@3390 | 1549 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 1550 | ins_cost(150); |
kvn@3390 | 1551 | ins_encode %{ |
kvn@3390 | 1552 | __ sqrtss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1553 | %} |
kvn@3390 | 1554 | ins_pipe(pipe_slow); |
kvn@3390 | 1555 | %} |
kvn@3390 | 1556 | |
kvn@3390 | 1557 | instruct sqrtF_mem(regF dst, memory src) %{ |
kvn@3390 | 1558 | predicate(UseSSE>=1); |
kvn@3390 | 1559 | match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src))))); |
kvn@3390 | 1560 | |
kvn@3390 | 1561 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 1562 | ins_cost(150); |
kvn@3390 | 1563 | ins_encode %{ |
kvn@3390 | 1564 | __ sqrtss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1565 | %} |
kvn@3390 | 1566 | ins_pipe(pipe_slow); |
kvn@3390 | 1567 | %} |
kvn@3390 | 1568 | |
kvn@3390 | 1569 | instruct sqrtF_imm(regF dst, immF con) %{ |
kvn@3390 | 1570 | predicate(UseSSE>=1); |
kvn@3390 | 1571 | match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); |
kvn@3390 | 1572 | format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1573 | ins_cost(150); |
kvn@3390 | 1574 | ins_encode %{ |
kvn@3390 | 1575 | __ sqrtss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1576 | %} |
kvn@3390 | 1577 | ins_pipe(pipe_slow); |
kvn@3390 | 1578 | %} |
kvn@3390 | 1579 | |
kvn@3390 | 1580 | instruct sqrtD_reg(regD dst, regD src) %{ |
kvn@3390 | 1581 | predicate(UseSSE>=2); |
kvn@3390 | 1582 | match(Set dst (SqrtD src)); |
kvn@3390 | 1583 | |
kvn@3390 | 1584 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 1585 | ins_cost(150); |
kvn@3390 | 1586 | ins_encode %{ |
kvn@3390 | 1587 | __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1588 | %} |
kvn@3390 | 1589 | ins_pipe(pipe_slow); |
kvn@3390 | 1590 | %} |
kvn@3390 | 1591 | |
kvn@3390 | 1592 | instruct sqrtD_mem(regD dst, memory src) %{ |
kvn@3390 | 1593 | predicate(UseSSE>=2); |
kvn@3390 | 1594 | match(Set dst (SqrtD (LoadD src))); |
kvn@3390 | 1595 | |
kvn@3390 | 1596 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 1597 | ins_cost(150); |
kvn@3390 | 1598 | ins_encode %{ |
kvn@3390 | 1599 | __ sqrtsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1600 | %} |
kvn@3390 | 1601 | ins_pipe(pipe_slow); |
kvn@3390 | 1602 | %} |
kvn@3390 | 1603 | |
kvn@3390 | 1604 | instruct sqrtD_imm(regD dst, immD con) %{ |
kvn@3390 | 1605 | predicate(UseSSE>=2); |
kvn@3390 | 1606 | match(Set dst (SqrtD con)); |
kvn@3390 | 1607 | format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1608 | ins_cost(150); |
kvn@3390 | 1609 | ins_encode %{ |
kvn@3390 | 1610 | __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1611 | %} |
kvn@3390 | 1612 | ins_pipe(pipe_slow); |
kvn@3390 | 1613 | %} |
kvn@3390 | 1614 | |
kvn@3882 | 1615 | |
kvn@3882 | 1616 | // ====================VECTOR INSTRUCTIONS===================================== |
kvn@3882 | 1617 | |
kvn@3882 | 1618 | // Load vectors (4 bytes long) |
kvn@3882 | 1619 | instruct loadV4(vecS dst, memory mem) %{ |
kvn@3882 | 1620 | predicate(n->as_LoadVector()->memory_size() == 4); |
kvn@3882 | 1621 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1622 | ins_cost(125); |
kvn@3882 | 1623 | format %{ "movd $dst,$mem\t! load vector (4 bytes)" %} |
kvn@3882 | 1624 | ins_encode %{ |
kvn@3882 | 1625 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1626 | %} |
kvn@3882 | 1627 | ins_pipe( pipe_slow ); |
kvn@3882 | 1628 | %} |
kvn@3882 | 1629 | |
kvn@3882 | 1630 | // Load vectors (8 bytes long) |
kvn@3882 | 1631 | instruct loadV8(vecD dst, memory mem) %{ |
kvn@3882 | 1632 | predicate(n->as_LoadVector()->memory_size() == 8); |
kvn@3882 | 1633 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1634 | ins_cost(125); |
kvn@3882 | 1635 | format %{ "movq $dst,$mem\t! load vector (8 bytes)" %} |
kvn@3882 | 1636 | ins_encode %{ |
kvn@3882 | 1637 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1638 | %} |
kvn@3882 | 1639 | ins_pipe( pipe_slow ); |
kvn@3882 | 1640 | %} |
kvn@3882 | 1641 | |
kvn@3882 | 1642 | // Load vectors (16 bytes long) |
kvn@3882 | 1643 | instruct loadV16(vecX dst, memory mem) %{ |
kvn@3882 | 1644 | predicate(n->as_LoadVector()->memory_size() == 16); |
kvn@3882 | 1645 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1646 | ins_cost(125); |
kvn@3882 | 1647 | format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %} |
kvn@3882 | 1648 | ins_encode %{ |
kvn@3882 | 1649 | __ movdqu($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1650 | %} |
kvn@3882 | 1651 | ins_pipe( pipe_slow ); |
kvn@3882 | 1652 | %} |
kvn@3882 | 1653 | |
kvn@3882 | 1654 | // Load vectors (32 bytes long) |
kvn@3882 | 1655 | instruct loadV32(vecY dst, memory mem) %{ |
kvn@3882 | 1656 | predicate(n->as_LoadVector()->memory_size() == 32); |
kvn@3882 | 1657 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1658 | ins_cost(125); |
kvn@3882 | 1659 | format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %} |
kvn@3882 | 1660 | ins_encode %{ |
kvn@3882 | 1661 | __ vmovdqu($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1662 | %} |
kvn@3882 | 1663 | ins_pipe( pipe_slow ); |
kvn@3882 | 1664 | %} |
kvn@3882 | 1665 | |
kvn@3882 | 1666 | // Store vectors |
kvn@3882 | 1667 | instruct storeV4(memory mem, vecS src) %{ |
kvn@3882 | 1668 | predicate(n->as_StoreVector()->memory_size() == 4); |
kvn@3882 | 1669 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1670 | ins_cost(145); |
kvn@3882 | 1671 | format %{ "movd $mem,$src\t! store vector (4 bytes)" %} |
kvn@3882 | 1672 | ins_encode %{ |
kvn@3882 | 1673 | __ movdl($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1674 | %} |
kvn@3882 | 1675 | ins_pipe( pipe_slow ); |
kvn@3882 | 1676 | %} |
kvn@3882 | 1677 | |
kvn@3882 | 1678 | instruct storeV8(memory mem, vecD src) %{ |
kvn@3882 | 1679 | predicate(n->as_StoreVector()->memory_size() == 8); |
kvn@3882 | 1680 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1681 | ins_cost(145); |
kvn@3882 | 1682 | format %{ "movq $mem,$src\t! store vector (8 bytes)" %} |
kvn@3882 | 1683 | ins_encode %{ |
kvn@3882 | 1684 | __ movq($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1685 | %} |
kvn@3882 | 1686 | ins_pipe( pipe_slow ); |
kvn@3882 | 1687 | %} |
kvn@3882 | 1688 | |
kvn@3882 | 1689 | instruct storeV16(memory mem, vecX src) %{ |
kvn@3882 | 1690 | predicate(n->as_StoreVector()->memory_size() == 16); |
kvn@3882 | 1691 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1692 | ins_cost(145); |
kvn@3882 | 1693 | format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %} |
kvn@3882 | 1694 | ins_encode %{ |
kvn@3882 | 1695 | __ movdqu($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1696 | %} |
kvn@3882 | 1697 | ins_pipe( pipe_slow ); |
kvn@3882 | 1698 | %} |
kvn@3882 | 1699 | |
kvn@3882 | 1700 | instruct storeV32(memory mem, vecY src) %{ |
kvn@3882 | 1701 | predicate(n->as_StoreVector()->memory_size() == 32); |
kvn@3882 | 1702 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1703 | ins_cost(145); |
kvn@3882 | 1704 | format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %} |
kvn@3882 | 1705 | ins_encode %{ |
kvn@3882 | 1706 | __ vmovdqu($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1707 | %} |
kvn@3882 | 1708 | ins_pipe( pipe_slow ); |
kvn@3882 | 1709 | %} |
kvn@3882 | 1710 | |
kvn@3882 | 1711 | // Replicate byte scalar to be vector |
kvn@3882 | 1712 | instruct Repl4B(vecS dst, rRegI src) %{ |
kvn@3882 | 1713 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1714 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1715 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1716 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1717 | "pshuflw $dst,$dst,0x00\t! replicate4B" %} |
kvn@3882 | 1718 | ins_encode %{ |
kvn@3882 | 1719 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1720 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1721 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1722 | %} |
kvn@3882 | 1723 | ins_pipe( pipe_slow ); |
kvn@3882 | 1724 | %} |
kvn@3882 | 1725 | |
kvn@3882 | 1726 | instruct Repl8B(vecD dst, rRegI src) %{ |
kvn@3882 | 1727 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1728 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1729 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1730 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1731 | "pshuflw $dst,$dst,0x00\t! replicate8B" %} |
kvn@3882 | 1732 | ins_encode %{ |
kvn@3882 | 1733 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1734 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1735 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1736 | %} |
kvn@3882 | 1737 | ins_pipe( pipe_slow ); |
kvn@3882 | 1738 | %} |
kvn@3882 | 1739 | |
kvn@3882 | 1740 | instruct Repl16B(vecX dst, rRegI src) %{ |
kvn@3882 | 1741 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1742 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1743 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1744 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1745 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1746 | "punpcklqdq $dst,$dst\t! replicate16B" %} |
kvn@3882 | 1747 | ins_encode %{ |
kvn@3882 | 1748 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1749 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1750 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1751 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1752 | %} |
kvn@3882 | 1753 | ins_pipe( pipe_slow ); |
kvn@3882 | 1754 | %} |
kvn@3882 | 1755 | |
kvn@3882 | 1756 | instruct Repl32B(vecY dst, rRegI src) %{ |
kvn@3882 | 1757 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1758 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1759 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1760 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1761 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1762 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1763 | "vinserti128h $dst,$dst,$dst\t! replicate32B" %} |
kvn@3882 | 1764 | ins_encode %{ |
kvn@3882 | 1765 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1766 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1767 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1768 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1769 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1770 | %} |
kvn@3882 | 1771 | ins_pipe( pipe_slow ); |
kvn@3882 | 1772 | %} |
kvn@3882 | 1773 | |
kvn@3882 | 1774 | // Replicate byte scalar immediate to be vector by loading from const table. |
kvn@3882 | 1775 | instruct Repl4B_imm(vecS dst, immI con) %{ |
kvn@3882 | 1776 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1777 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1778 | format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %} |
kvn@3882 | 1779 | ins_encode %{ |
kvn@3929 | 1780 | __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1))); |
kvn@3882 | 1781 | %} |
kvn@3882 | 1782 | ins_pipe( pipe_slow ); |
kvn@3882 | 1783 | %} |
kvn@3882 | 1784 | |
kvn@3882 | 1785 | instruct Repl8B_imm(vecD dst, immI con) %{ |
kvn@3882 | 1786 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1787 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1788 | format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %} |
kvn@3882 | 1789 | ins_encode %{ |
kvn@3929 | 1790 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3882 | 1791 | %} |
kvn@3882 | 1792 | ins_pipe( pipe_slow ); |
kvn@3882 | 1793 | %} |
kvn@3882 | 1794 | |
kvn@3882 | 1795 | instruct Repl16B_imm(vecX dst, immI con) %{ |
kvn@3882 | 1796 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1797 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1798 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1799 | "punpcklqdq $dst,$dst\t! replicate16B($con)" %} |
kvn@3882 | 1800 | ins_encode %{ |
kvn@3929 | 1801 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3929 | 1802 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1803 | %} |
kvn@3882 | 1804 | ins_pipe( pipe_slow ); |
kvn@3882 | 1805 | %} |
kvn@3882 | 1806 | |
kvn@3882 | 1807 | instruct Repl32B_imm(vecY dst, immI con) %{ |
kvn@3882 | 1808 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1809 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1810 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1811 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1812 | "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %} |
kvn@3882 | 1813 | ins_encode %{ |
kvn@3929 | 1814 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3929 | 1815 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1816 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1817 | %} |
kvn@3882 | 1818 | ins_pipe( pipe_slow ); |
kvn@3882 | 1819 | %} |
kvn@3882 | 1820 | |
kvn@3882 | 1821 | // Replicate byte scalar zero to be vector |
kvn@3882 | 1822 | instruct Repl4B_zero(vecS dst, immI0 zero) %{ |
kvn@3882 | 1823 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1824 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1825 | format %{ "pxor $dst,$dst\t! replicate4B zero" %} |
kvn@3882 | 1826 | ins_encode %{ |
kvn@3882 | 1827 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1828 | %} |
kvn@3882 | 1829 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1830 | %} |
kvn@3882 | 1831 | |
kvn@3882 | 1832 | instruct Repl8B_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 1833 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1834 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1835 | format %{ "pxor $dst,$dst\t! replicate8B zero" %} |
kvn@3882 | 1836 | ins_encode %{ |
kvn@3882 | 1837 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1838 | %} |
kvn@3882 | 1839 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1840 | %} |
kvn@3882 | 1841 | |
kvn@3882 | 1842 | instruct Repl16B_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 1843 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1844 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1845 | format %{ "pxor $dst,$dst\t! replicate16B zero" %} |
kvn@3882 | 1846 | ins_encode %{ |
kvn@3882 | 1847 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1848 | %} |
kvn@3882 | 1849 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1850 | %} |
kvn@3882 | 1851 | |
kvn@3882 | 1852 | instruct Repl32B_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 1853 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1854 | match(Set dst (ReplicateB zero)); |
kvn@3929 | 1855 | format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %} |
kvn@3882 | 1856 | ins_encode %{ |
kvn@3882 | 1857 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 1858 | bool vector256 = true; |
kvn@3929 | 1859 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 1860 | %} |
kvn@3882 | 1861 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1862 | %} |
kvn@3882 | 1863 | |
kvn@3882 | 1864 | // Replicate char/short (2 byte) scalar to be vector |
kvn@3882 | 1865 | instruct Repl2S(vecS dst, rRegI src) %{ |
kvn@3882 | 1866 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1867 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1868 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1869 | "pshuflw $dst,$dst,0x00\t! replicate2S" %} |
kvn@3882 | 1870 | ins_encode %{ |
kvn@3882 | 1871 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1872 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1873 | %} |
kvn@3882 | 1874 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1875 | %} |
kvn@3882 | 1876 | |
kvn@3882 | 1877 | instruct Repl4S(vecD dst, rRegI src) %{ |
kvn@3882 | 1878 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1879 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1880 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1881 | "pshuflw $dst,$dst,0x00\t! replicate4S" %} |
kvn@3882 | 1882 | ins_encode %{ |
kvn@3882 | 1883 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1884 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1885 | %} |
kvn@3882 | 1886 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1887 | %} |
kvn@3882 | 1888 | |
kvn@3882 | 1889 | instruct Repl8S(vecX dst, rRegI src) %{ |
kvn@3882 | 1890 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1891 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1892 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1893 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1894 | "punpcklqdq $dst,$dst\t! replicate8S" %} |
kvn@3882 | 1895 | ins_encode %{ |
kvn@3882 | 1896 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1897 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1898 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1899 | %} |
kvn@3882 | 1900 | ins_pipe( pipe_slow ); |
kvn@3882 | 1901 | %} |
kvn@3882 | 1902 | |
kvn@3882 | 1903 | instruct Repl16S(vecY dst, rRegI src) %{ |
kvn@3882 | 1904 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1905 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1906 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1907 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1908 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1909 | "vinserti128h $dst,$dst,$dst\t! replicate16S" %} |
kvn@3882 | 1910 | ins_encode %{ |
kvn@3882 | 1911 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1912 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1913 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1914 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1915 | %} |
kvn@3882 | 1916 | ins_pipe( pipe_slow ); |
kvn@3882 | 1917 | %} |
kvn@3882 | 1918 | |
kvn@3882 | 1919 | // Replicate char/short (2 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 1920 | instruct Repl2S_imm(vecS dst, immI con) %{ |
kvn@3882 | 1921 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1922 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1923 | format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %} |
kvn@3882 | 1924 | ins_encode %{ |
kvn@3929 | 1925 | __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2))); |
kvn@3882 | 1926 | %} |
kvn@3882 | 1927 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1928 | %} |
kvn@3882 | 1929 | |
kvn@3882 | 1930 | instruct Repl4S_imm(vecD dst, immI con) %{ |
kvn@3882 | 1931 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1932 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1933 | format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %} |
kvn@3882 | 1934 | ins_encode %{ |
kvn@3929 | 1935 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3882 | 1936 | %} |
kvn@3882 | 1937 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1938 | %} |
kvn@3882 | 1939 | |
kvn@3882 | 1940 | instruct Repl8S_imm(vecX dst, immI con) %{ |
kvn@3882 | 1941 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1942 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1943 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1944 | "punpcklqdq $dst,$dst\t! replicate8S($con)" %} |
kvn@3882 | 1945 | ins_encode %{ |
kvn@3929 | 1946 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3929 | 1947 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1948 | %} |
kvn@3882 | 1949 | ins_pipe( pipe_slow ); |
kvn@3882 | 1950 | %} |
kvn@3882 | 1951 | |
kvn@3882 | 1952 | instruct Repl16S_imm(vecY dst, immI con) %{ |
kvn@3882 | 1953 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1954 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1955 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1956 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1957 | "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %} |
kvn@3882 | 1958 | ins_encode %{ |
kvn@3929 | 1959 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3929 | 1960 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1961 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1962 | %} |
kvn@3882 | 1963 | ins_pipe( pipe_slow ); |
kvn@3882 | 1964 | %} |
kvn@3882 | 1965 | |
kvn@3882 | 1966 | // Replicate char/short (2 byte) scalar zero to be vector |
kvn@3882 | 1967 | instruct Repl2S_zero(vecS dst, immI0 zero) %{ |
kvn@3882 | 1968 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1969 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1970 | format %{ "pxor $dst,$dst\t! replicate2S zero" %} |
kvn@3882 | 1971 | ins_encode %{ |
kvn@3882 | 1972 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1973 | %} |
kvn@3882 | 1974 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1975 | %} |
kvn@3882 | 1976 | |
kvn@3882 | 1977 | instruct Repl4S_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 1978 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1979 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1980 | format %{ "pxor $dst,$dst\t! replicate4S zero" %} |
kvn@3882 | 1981 | ins_encode %{ |
kvn@3882 | 1982 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1983 | %} |
kvn@3882 | 1984 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1985 | %} |
kvn@3882 | 1986 | |
kvn@3882 | 1987 | instruct Repl8S_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 1988 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1989 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1990 | format %{ "pxor $dst,$dst\t! replicate8S zero" %} |
kvn@3882 | 1991 | ins_encode %{ |
kvn@3882 | 1992 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1993 | %} |
kvn@3882 | 1994 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1995 | %} |
kvn@3882 | 1996 | |
kvn@3882 | 1997 | instruct Repl16S_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 1998 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1999 | match(Set dst (ReplicateS zero)); |
kvn@3929 | 2000 | format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %} |
kvn@3882 | 2001 | ins_encode %{ |
kvn@3882 | 2002 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 2003 | bool vector256 = true; |
kvn@3929 | 2004 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2005 | %} |
kvn@3882 | 2006 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2007 | %} |
kvn@3882 | 2008 | |
kvn@3882 | 2009 | // Replicate integer (4 byte) scalar to be vector |
kvn@3882 | 2010 | instruct Repl2I(vecD dst, rRegI src) %{ |
kvn@3882 | 2011 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2012 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2013 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2014 | "pshufd $dst,$dst,0x00\t! replicate2I" %} |
kvn@3882 | 2015 | ins_encode %{ |
kvn@3882 | 2016 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2017 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2018 | %} |
kvn@3882 | 2019 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2020 | %} |
kvn@3882 | 2021 | |
kvn@3882 | 2022 | instruct Repl4I(vecX dst, rRegI src) %{ |
kvn@3882 | 2023 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2024 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2025 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2026 | "pshufd $dst,$dst,0x00\t! replicate4I" %} |
kvn@3882 | 2027 | ins_encode %{ |
kvn@3882 | 2028 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2029 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2030 | %} |
kvn@3882 | 2031 | ins_pipe( pipe_slow ); |
kvn@3882 | 2032 | %} |
kvn@3882 | 2033 | |
kvn@3882 | 2034 | instruct Repl8I(vecY dst, rRegI src) %{ |
kvn@3882 | 2035 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2036 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2037 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2038 | "pshufd $dst,$dst,0x00\n\t" |
kvn@3929 | 2039 | "vinserti128h $dst,$dst,$dst\t! replicate8I" %} |
kvn@3882 | 2040 | ins_encode %{ |
kvn@3882 | 2041 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2042 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 2043 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2044 | %} |
kvn@3882 | 2045 | ins_pipe( pipe_slow ); |
kvn@3882 | 2046 | %} |
kvn@3882 | 2047 | |
kvn@3882 | 2048 | // Replicate integer (4 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 2049 | instruct Repl2I_imm(vecD dst, immI con) %{ |
kvn@3882 | 2050 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2051 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2052 | format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %} |
kvn@3882 | 2053 | ins_encode %{ |
kvn@3929 | 2054 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3882 | 2055 | %} |
kvn@3882 | 2056 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2057 | %} |
kvn@3882 | 2058 | |
kvn@3882 | 2059 | instruct Repl4I_imm(vecX dst, immI con) %{ |
kvn@3882 | 2060 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2061 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2062 | format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t" |
kvn@3929 | 2063 | "punpcklqdq $dst,$dst" %} |
kvn@3882 | 2064 | ins_encode %{ |
kvn@3929 | 2065 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3929 | 2066 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2067 | %} |
kvn@3882 | 2068 | ins_pipe( pipe_slow ); |
kvn@3882 | 2069 | %} |
kvn@3882 | 2070 | |
kvn@3882 | 2071 | instruct Repl8I_imm(vecY dst, immI con) %{ |
kvn@3882 | 2072 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2073 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2074 | format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" |
kvn@3929 | 2075 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2076 | "vinserti128h $dst,$dst,$dst" %} |
kvn@3882 | 2077 | ins_encode %{ |
kvn@3929 | 2078 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3929 | 2079 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2080 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2081 | %} |
kvn@3882 | 2082 | ins_pipe( pipe_slow ); |
kvn@3882 | 2083 | %} |
kvn@3882 | 2084 | |
kvn@3882 | 2085 | // Integer could be loaded into xmm register directly from memory. |
kvn@3882 | 2086 | instruct Repl2I_mem(vecD dst, memory mem) %{ |
kvn@3882 | 2087 | predicate(n->as_Vector()->length() == 2); |
kvn@3929 | 2088 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2089 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2090 | "pshufd $dst,$dst,0x00\t! replicate2I" %} |
kvn@3882 | 2091 | ins_encode %{ |
kvn@3882 | 2092 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2093 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2094 | %} |
kvn@3882 | 2095 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2096 | %} |
kvn@3882 | 2097 | |
kvn@3882 | 2098 | instruct Repl4I_mem(vecX dst, memory mem) %{ |
kvn@3882 | 2099 | predicate(n->as_Vector()->length() == 4); |
kvn@3929 | 2100 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2101 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2102 | "pshufd $dst,$dst,0x00\t! replicate4I" %} |
kvn@3882 | 2103 | ins_encode %{ |
kvn@3882 | 2104 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2105 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2106 | %} |
kvn@3882 | 2107 | ins_pipe( pipe_slow ); |
kvn@3882 | 2108 | %} |
kvn@3882 | 2109 | |
kvn@3882 | 2110 | instruct Repl8I_mem(vecY dst, memory mem) %{ |
kvn@3882 | 2111 | predicate(n->as_Vector()->length() == 8); |
kvn@3929 | 2112 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2113 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2114 | "pshufd $dst,$dst,0x00\n\t" |
kvn@3929 | 2115 | "vinserti128h $dst,$dst,$dst\t! replicate8I" %} |
kvn@3882 | 2116 | ins_encode %{ |
kvn@3882 | 2117 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2118 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 2119 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2120 | %} |
kvn@3882 | 2121 | ins_pipe( pipe_slow ); |
kvn@3882 | 2122 | %} |
kvn@3882 | 2123 | |
kvn@3882 | 2124 | // Replicate integer (4 byte) scalar zero to be vector |
kvn@3882 | 2125 | instruct Repl2I_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 2126 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2127 | match(Set dst (ReplicateI zero)); |
kvn@3882 | 2128 | format %{ "pxor $dst,$dst\t! replicate2I" %} |
kvn@3882 | 2129 | ins_encode %{ |
kvn@3882 | 2130 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2131 | %} |
kvn@3882 | 2132 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2133 | %} |
kvn@3882 | 2134 | |
kvn@3882 | 2135 | instruct Repl4I_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 2136 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2137 | match(Set dst (ReplicateI zero)); |
kvn@3882 | 2138 | format %{ "pxor $dst,$dst\t! replicate4I zero)" %} |
kvn@3882 | 2139 | ins_encode %{ |
kvn@3882 | 2140 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2141 | %} |
kvn@3882 | 2142 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2143 | %} |
kvn@3882 | 2144 | |
kvn@3882 | 2145 | instruct Repl8I_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 2146 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2147 | match(Set dst (ReplicateI zero)); |
kvn@3929 | 2148 | format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %} |
kvn@3882 | 2149 | ins_encode %{ |
kvn@3882 | 2150 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 2151 | bool vector256 = true; |
kvn@3929 | 2152 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2153 | %} |
kvn@3882 | 2154 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2155 | %} |
kvn@3882 | 2156 | |
kvn@3882 | 2157 | // Replicate long (8 byte) scalar to be vector |
kvn@3882 | 2158 | #ifdef _LP64 |
kvn@3882 | 2159 | instruct Repl2L(vecX dst, rRegL src) %{ |
kvn@3882 | 2160 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2161 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2162 | format %{ "movdq $dst,$src\n\t" |
kvn@3929 | 2163 | "punpcklqdq $dst,$dst\t! replicate2L" %} |
kvn@3882 | 2164 | ins_encode %{ |
kvn@3882 | 2165 | __ movdq($dst$$XMMRegister, $src$$Register); |
kvn@3929 | 2166 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2167 | %} |
kvn@3882 | 2168 | ins_pipe( pipe_slow ); |
kvn@3882 | 2169 | %} |
kvn@3882 | 2170 | |
kvn@3882 | 2171 | instruct Repl4L(vecY dst, rRegL src) %{ |
kvn@3882 | 2172 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2173 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2174 | format %{ "movdq $dst,$src\n\t" |
kvn@3929 | 2175 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2176 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2177 | ins_encode %{ |
kvn@3882 | 2178 | __ movdq($dst$$XMMRegister, $src$$Register); |
kvn@3929 | 2179 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2180 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2181 | %} |
kvn@3882 | 2182 | ins_pipe( pipe_slow ); |
kvn@3882 | 2183 | %} |
kvn@3882 | 2184 | #else // _LP64 |
kvn@3882 | 2185 | instruct Repl2L(vecX dst, eRegL src, regD tmp) %{ |
kvn@3882 | 2186 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2187 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2188 | effect(TEMP dst, USE src, TEMP tmp); |
kvn@3882 | 2189 | format %{ "movdl $dst,$src.lo\n\t" |
kvn@3882 | 2190 | "movdl $tmp,$src.hi\n\t" |
kvn@3882 | 2191 | "punpckldq $dst,$tmp\n\t" |
kvn@3929 | 2192 | "punpcklqdq $dst,$dst\t! replicate2L"%} |
kvn@3882 | 2193 | ins_encode %{ |
kvn@3882 | 2194 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2195 | __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
kvn@3882 | 2196 | __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
kvn@3929 | 2197 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2198 | %} |
kvn@3882 | 2199 | ins_pipe( pipe_slow ); |
kvn@3882 | 2200 | %} |
kvn@3882 | 2201 | |
kvn@3882 | 2202 | instruct Repl4L(vecY dst, eRegL src, regD tmp) %{ |
kvn@3882 | 2203 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2204 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2205 | effect(TEMP dst, USE src, TEMP tmp); |
kvn@3882 | 2206 | format %{ "movdl $dst,$src.lo\n\t" |
kvn@3882 | 2207 | "movdl $tmp,$src.hi\n\t" |
kvn@3882 | 2208 | "punpckldq $dst,$tmp\n\t" |
kvn@3929 | 2209 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2210 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2211 | ins_encode %{ |
kvn@3882 | 2212 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2213 | __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
kvn@3882 | 2214 | __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
kvn@3929 | 2215 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2216 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2217 | %} |
kvn@3882 | 2218 | ins_pipe( pipe_slow ); |
kvn@3882 | 2219 | %} |
kvn@3882 | 2220 | #endif // _LP64 |
kvn@3882 | 2221 | |
kvn@3882 | 2222 | // Replicate long (8 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 2223 | instruct Repl2L_imm(vecX dst, immL con) %{ |
kvn@3882 | 2224 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2225 | match(Set dst (ReplicateL con)); |
kvn@3929 | 2226 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 2227 | "punpcklqdq $dst,$dst\t! replicate2L($con)" %} |
kvn@3882 | 2228 | ins_encode %{ |
kvn@3929 | 2229 | __ movq($dst$$XMMRegister, $constantaddress($con)); |
kvn@3929 | 2230 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2231 | %} |
kvn@3882 | 2232 | ins_pipe( pipe_slow ); |
kvn@3882 | 2233 | %} |
kvn@3882 | 2234 | |
kvn@3882 | 2235 | instruct Repl4L_imm(vecY dst, immL con) %{ |
kvn@3882 | 2236 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2237 | match(Set dst (ReplicateL con)); |
kvn@3929 | 2238 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 2239 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2240 | "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %} |
kvn@3882 | 2241 | ins_encode %{ |
kvn@3929 | 2242 | __ movq($dst$$XMMRegister, $constantaddress($con)); |
kvn@3929 | 2243 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2244 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2245 | %} |
kvn@3882 | 2246 | ins_pipe( pipe_slow ); |
kvn@3882 | 2247 | %} |
kvn@3882 | 2248 | |
kvn@3882 | 2249 | // Long could be loaded into xmm register directly from memory. |
kvn@3882 | 2250 | instruct Repl2L_mem(vecX dst, memory mem) %{ |
kvn@3882 | 2251 | predicate(n->as_Vector()->length() == 2); |
kvn@3929 | 2252 | match(Set dst (ReplicateL (LoadL mem))); |
kvn@3882 | 2253 | format %{ "movq $dst,$mem\n\t" |
kvn@3929 | 2254 | "punpcklqdq $dst,$dst\t! replicate2L" %} |
kvn@3882 | 2255 | ins_encode %{ |
kvn@3882 | 2256 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3929 | 2257 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2258 | %} |
kvn@3882 | 2259 | ins_pipe( pipe_slow ); |
kvn@3882 | 2260 | %} |
kvn@3882 | 2261 | |
kvn@3882 | 2262 | instruct Repl4L_mem(vecY dst, memory mem) %{ |
kvn@3882 | 2263 | predicate(n->as_Vector()->length() == 4); |
kvn@3929 | 2264 | match(Set dst (ReplicateL (LoadL mem))); |
kvn@3882 | 2265 | format %{ "movq $dst,$mem\n\t" |
kvn@3929 | 2266 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2267 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2268 | ins_encode %{ |
kvn@3882 | 2269 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3929 | 2270 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2271 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2272 | %} |
kvn@3882 | 2273 | ins_pipe( pipe_slow ); |
kvn@3882 | 2274 | %} |
kvn@3882 | 2275 | |
kvn@3882 | 2276 | // Replicate long (8 byte) scalar zero to be vector |
kvn@3882 | 2277 | instruct Repl2L_zero(vecX dst, immL0 zero) %{ |
kvn@3882 | 2278 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2279 | match(Set dst (ReplicateL zero)); |
kvn@3882 | 2280 | format %{ "pxor $dst,$dst\t! replicate2L zero" %} |
kvn@3882 | 2281 | ins_encode %{ |
kvn@3882 | 2282 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2283 | %} |
kvn@3882 | 2284 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2285 | %} |
kvn@3882 | 2286 | |
kvn@3882 | 2287 | instruct Repl4L_zero(vecY dst, immL0 zero) %{ |
kvn@3882 | 2288 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2289 | match(Set dst (ReplicateL zero)); |
kvn@3929 | 2290 | format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %} |
kvn@3882 | 2291 | ins_encode %{ |
kvn@3882 | 2292 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 2293 | bool vector256 = true; |
kvn@3929 | 2294 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2295 | %} |
kvn@3882 | 2296 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2297 | %} |
kvn@3882 | 2298 | |
kvn@3882 | 2299 | // Replicate float (4 byte) scalar to be vector |
kvn@3882 | 2300 | instruct Repl2F(vecD dst, regF src) %{ |
kvn@3882 | 2301 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2302 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2303 | format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %} |
kvn@3882 | 2304 | ins_encode %{ |
kvn@3882 | 2305 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2306 | %} |
kvn@3882 | 2307 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2308 | %} |
kvn@3882 | 2309 | |
kvn@3882 | 2310 | instruct Repl4F(vecX dst, regF src) %{ |
kvn@3882 | 2311 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2312 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2313 | format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %} |
kvn@3882 | 2314 | ins_encode %{ |
kvn@3882 | 2315 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2316 | %} |
kvn@3882 | 2317 | ins_pipe( pipe_slow ); |
kvn@3882 | 2318 | %} |
kvn@3882 | 2319 | |
kvn@3882 | 2320 | instruct Repl8F(vecY dst, regF src) %{ |
kvn@3882 | 2321 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2322 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2323 | format %{ "pshufd $dst,$src,0x00\n\t" |
kvn@3882 | 2324 | "vinsertf128h $dst,$dst,$dst\t! replicate8F" %} |
kvn@3882 | 2325 | ins_encode %{ |
kvn@3882 | 2326 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2327 | __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2328 | %} |
kvn@3882 | 2329 | ins_pipe( pipe_slow ); |
kvn@3882 | 2330 | %} |
kvn@3882 | 2331 | |
kvn@3882 | 2332 | // Replicate float (4 byte) scalar zero to be vector |
kvn@3882 | 2333 | instruct Repl2F_zero(vecD dst, immF0 zero) %{ |
kvn@3882 | 2334 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2335 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2336 | format %{ "xorps $dst,$dst\t! replicate2F zero" %} |
kvn@3882 | 2337 | ins_encode %{ |
kvn@3882 | 2338 | __ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2339 | %} |
kvn@3882 | 2340 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2341 | %} |
kvn@3882 | 2342 | |
kvn@3882 | 2343 | instruct Repl4F_zero(vecX dst, immF0 zero) %{ |
kvn@3882 | 2344 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2345 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2346 | format %{ "xorps $dst,$dst\t! replicate4F zero" %} |
kvn@3882 | 2347 | ins_encode %{ |
kvn@3882 | 2348 | __ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2349 | %} |
kvn@3882 | 2350 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2351 | %} |
kvn@3882 | 2352 | |
kvn@3882 | 2353 | instruct Repl8F_zero(vecY dst, immF0 zero) %{ |
kvn@3882 | 2354 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2355 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2356 | format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %} |
kvn@3882 | 2357 | ins_encode %{ |
kvn@3882 | 2358 | bool vector256 = true; |
kvn@3882 | 2359 | __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2360 | %} |
kvn@3882 | 2361 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2362 | %} |
kvn@3882 | 2363 | |
kvn@3882 | 2364 | // Replicate double (8 bytes) scalar to be vector |
kvn@3882 | 2365 | instruct Repl2D(vecX dst, regD src) %{ |
kvn@3882 | 2366 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2367 | match(Set dst (ReplicateD src)); |
kvn@3882 | 2368 | format %{ "pshufd $dst,$src,0x44\t! replicate2D" %} |
kvn@3882 | 2369 | ins_encode %{ |
kvn@3882 | 2370 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
kvn@3882 | 2371 | %} |
kvn@3882 | 2372 | ins_pipe( pipe_slow ); |
kvn@3882 | 2373 | %} |
kvn@3882 | 2374 | |
kvn@3882 | 2375 | instruct Repl4D(vecY dst, regD src) %{ |
kvn@3882 | 2376 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2377 | match(Set dst (ReplicateD src)); |
kvn@3882 | 2378 | format %{ "pshufd $dst,$src,0x44\n\t" |
kvn@3882 | 2379 | "vinsertf128h $dst,$dst,$dst\t! replicate4D" %} |
kvn@3882 | 2380 | ins_encode %{ |
kvn@3882 | 2381 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
kvn@3882 | 2382 | __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2383 | %} |
kvn@3882 | 2384 | ins_pipe( pipe_slow ); |
kvn@3882 | 2385 | %} |
kvn@3882 | 2386 | |
kvn@3882 | 2387 | // Replicate double (8 byte) scalar zero to be vector |
kvn@3882 | 2388 | instruct Repl2D_zero(vecX dst, immD0 zero) %{ |
kvn@3882 | 2389 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2390 | match(Set dst (ReplicateD zero)); |
kvn@3882 | 2391 | format %{ "xorpd $dst,$dst\t! replicate2D zero" %} |
kvn@3882 | 2392 | ins_encode %{ |
kvn@3882 | 2393 | __ xorpd($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2394 | %} |
kvn@3882 | 2395 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2396 | %} |
kvn@3882 | 2397 | |
kvn@3882 | 2398 | instruct Repl4D_zero(vecY dst, immD0 zero) %{ |
kvn@3882 | 2399 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2400 | match(Set dst (ReplicateD zero)); |
kvn@3882 | 2401 | format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %} |
kvn@3882 | 2402 | ins_encode %{ |
kvn@3882 | 2403 | bool vector256 = true; |
kvn@3882 | 2404 | __ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2405 | %} |
kvn@3882 | 2406 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2407 | %} |
kvn@3882 | 2408 | |
kvn@4001 | 2409 | // ====================VECTOR ARITHMETIC======================================= |
kvn@4001 | 2410 | |
kvn@4001 | 2411 | // --------------------------------- ADD -------------------------------------- |
kvn@4001 | 2412 | |
kvn@4001 | 2413 | // Bytes vector add |
kvn@4001 | 2414 | instruct vadd4B(vecS dst, vecS src) %{ |
kvn@4001 | 2415 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2416 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2417 | format %{ "paddb $dst,$src\t! add packed4B" %} |
kvn@4001 | 2418 | ins_encode %{ |
kvn@4001 | 2419 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2420 | %} |
kvn@4001 | 2421 | ins_pipe( pipe_slow ); |
kvn@4001 | 2422 | %} |
kvn@4001 | 2423 | |
kvn@4001 | 2424 | instruct vadd4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2425 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2426 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2427 | format %{ "vpaddb $dst,$src1,$src2\t! add packed4B" %} |
kvn@4001 | 2428 | ins_encode %{ |
kvn@4001 | 2429 | bool vector256 = false; |
kvn@4001 | 2430 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2431 | %} |
kvn@4001 | 2432 | ins_pipe( pipe_slow ); |
kvn@4001 | 2433 | %} |
kvn@4001 | 2434 | |
kvn@4001 | 2435 | instruct vadd8B(vecD dst, vecD src) %{ |
kvn@4001 | 2436 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2437 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2438 | format %{ "paddb $dst,$src\t! add packed8B" %} |
kvn@4001 | 2439 | ins_encode %{ |
kvn@4001 | 2440 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2441 | %} |
kvn@4001 | 2442 | ins_pipe( pipe_slow ); |
kvn@4001 | 2443 | %} |
kvn@4001 | 2444 | |
kvn@4001 | 2445 | instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2446 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2447 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2448 | format %{ "vpaddb $dst,$src1,$src2\t! add packed8B" %} |
kvn@4001 | 2449 | ins_encode %{ |
kvn@4001 | 2450 | bool vector256 = false; |
kvn@4001 | 2451 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2452 | %} |
kvn@4001 | 2453 | ins_pipe( pipe_slow ); |
kvn@4001 | 2454 | %} |
kvn@4001 | 2455 | |
kvn@4001 | 2456 | instruct vadd16B(vecX dst, vecX src) %{ |
kvn@4001 | 2457 | predicate(n->as_Vector()->length() == 16); |
kvn@4001 | 2458 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2459 | format %{ "paddb $dst,$src\t! add packed16B" %} |
kvn@4001 | 2460 | ins_encode %{ |
kvn@4001 | 2461 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2462 | %} |
kvn@4001 | 2463 | ins_pipe( pipe_slow ); |
kvn@4001 | 2464 | %} |
kvn@4001 | 2465 | |
kvn@4001 | 2466 | instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2467 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2468 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2469 | format %{ "vpaddb $dst,$src1,$src2\t! add packed16B" %} |
kvn@4001 | 2470 | ins_encode %{ |
kvn@4001 | 2471 | bool vector256 = false; |
kvn@4001 | 2472 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2473 | %} |
kvn@4001 | 2474 | ins_pipe( pipe_slow ); |
kvn@4001 | 2475 | %} |
kvn@4001 | 2476 | |
kvn@4001 | 2477 | instruct vadd16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2478 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2479 | match(Set dst (AddVB src (LoadVector mem))); |
kvn@4001 | 2480 | format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %} |
kvn@4001 | 2481 | ins_encode %{ |
kvn@4001 | 2482 | bool vector256 = false; |
kvn@4001 | 2483 | __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2484 | %} |
kvn@4001 | 2485 | ins_pipe( pipe_slow ); |
kvn@4001 | 2486 | %} |
kvn@4001 | 2487 | |
kvn@4001 | 2488 | instruct vadd32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2489 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2490 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2491 | format %{ "vpaddb $dst,$src1,$src2\t! add packed32B" %} |
kvn@4001 | 2492 | ins_encode %{ |
kvn@4001 | 2493 | bool vector256 = true; |
kvn@4001 | 2494 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2495 | %} |
kvn@4001 | 2496 | ins_pipe( pipe_slow ); |
kvn@4001 | 2497 | %} |
kvn@4001 | 2498 | |
kvn@4001 | 2499 | instruct vadd32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2500 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2501 | match(Set dst (AddVB src (LoadVector mem))); |
kvn@4001 | 2502 | format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %} |
kvn@4001 | 2503 | ins_encode %{ |
kvn@4001 | 2504 | bool vector256 = true; |
kvn@4001 | 2505 | __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2506 | %} |
kvn@4001 | 2507 | ins_pipe( pipe_slow ); |
kvn@4001 | 2508 | %} |
kvn@4001 | 2509 | |
kvn@4001 | 2510 | // Shorts/Chars vector add |
kvn@4001 | 2511 | instruct vadd2S(vecS dst, vecS src) %{ |
kvn@4001 | 2512 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2513 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2514 | format %{ "paddw $dst,$src\t! add packed2S" %} |
kvn@4001 | 2515 | ins_encode %{ |
kvn@4001 | 2516 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2517 | %} |
kvn@4001 | 2518 | ins_pipe( pipe_slow ); |
kvn@4001 | 2519 | %} |
kvn@4001 | 2520 | |
kvn@4001 | 2521 | instruct vadd2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2522 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2523 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2524 | format %{ "vpaddw $dst,$src1,$src2\t! add packed2S" %} |
kvn@4001 | 2525 | ins_encode %{ |
kvn@4001 | 2526 | bool vector256 = false; |
kvn@4001 | 2527 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2528 | %} |
kvn@4001 | 2529 | ins_pipe( pipe_slow ); |
kvn@4001 | 2530 | %} |
kvn@4001 | 2531 | |
kvn@4001 | 2532 | instruct vadd4S(vecD dst, vecD src) %{ |
kvn@4001 | 2533 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2534 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2535 | format %{ "paddw $dst,$src\t! add packed4S" %} |
kvn@4001 | 2536 | ins_encode %{ |
kvn@4001 | 2537 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2538 | %} |
kvn@4001 | 2539 | ins_pipe( pipe_slow ); |
kvn@4001 | 2540 | %} |
kvn@4001 | 2541 | |
kvn@4001 | 2542 | instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2543 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2544 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2545 | format %{ "vpaddw $dst,$src1,$src2\t! add packed4S" %} |
kvn@4001 | 2546 | ins_encode %{ |
kvn@4001 | 2547 | bool vector256 = false; |
kvn@4001 | 2548 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2549 | %} |
kvn@4001 | 2550 | ins_pipe( pipe_slow ); |
kvn@4001 | 2551 | %} |
kvn@4001 | 2552 | |
kvn@4001 | 2553 | instruct vadd8S(vecX dst, vecX src) %{ |
kvn@4001 | 2554 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2555 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2556 | format %{ "paddw $dst,$src\t! add packed8S" %} |
kvn@4001 | 2557 | ins_encode %{ |
kvn@4001 | 2558 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2559 | %} |
kvn@4001 | 2560 | ins_pipe( pipe_slow ); |
kvn@4001 | 2561 | %} |
kvn@4001 | 2562 | |
kvn@4001 | 2563 | instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2564 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2565 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2566 | format %{ "vpaddw $dst,$src1,$src2\t! add packed8S" %} |
kvn@4001 | 2567 | ins_encode %{ |
kvn@4001 | 2568 | bool vector256 = false; |
kvn@4001 | 2569 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2570 | %} |
kvn@4001 | 2571 | ins_pipe( pipe_slow ); |
kvn@4001 | 2572 | %} |
kvn@4001 | 2573 | |
kvn@4001 | 2574 | instruct vadd8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2575 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2576 | match(Set dst (AddVS src (LoadVector mem))); |
kvn@4001 | 2577 | format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %} |
kvn@4001 | 2578 | ins_encode %{ |
kvn@4001 | 2579 | bool vector256 = false; |
kvn@4001 | 2580 | __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2581 | %} |
kvn@4001 | 2582 | ins_pipe( pipe_slow ); |
kvn@4001 | 2583 | %} |
kvn@4001 | 2584 | |
kvn@4001 | 2585 | instruct vadd16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2586 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 2587 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2588 | format %{ "vpaddw $dst,$src1,$src2\t! add packed16S" %} |
kvn@4001 | 2589 | ins_encode %{ |
kvn@4001 | 2590 | bool vector256 = true; |
kvn@4001 | 2591 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2592 | %} |
kvn@4001 | 2593 | ins_pipe( pipe_slow ); |
kvn@4001 | 2594 | %} |
kvn@4001 | 2595 | |
kvn@4001 | 2596 | instruct vadd16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2597 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 2598 | match(Set dst (AddVS src (LoadVector mem))); |
kvn@4001 | 2599 | format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %} |
kvn@4001 | 2600 | ins_encode %{ |
kvn@4001 | 2601 | bool vector256 = true; |
kvn@4001 | 2602 | __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2603 | %} |
kvn@4001 | 2604 | ins_pipe( pipe_slow ); |
kvn@4001 | 2605 | %} |
kvn@4001 | 2606 | |
kvn@4001 | 2607 | // Integers vector add |
kvn@4001 | 2608 | instruct vadd2I(vecD dst, vecD src) %{ |
kvn@4001 | 2609 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2610 | match(Set dst (AddVI dst src)); |
kvn@4001 | 2611 | format %{ "paddd $dst,$src\t! add packed2I" %} |
kvn@4001 | 2612 | ins_encode %{ |
kvn@4001 | 2613 | __ paddd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2614 | %} |
kvn@4001 | 2615 | ins_pipe( pipe_slow ); |
kvn@4001 | 2616 | %} |
kvn@4001 | 2617 | |
kvn@4001 | 2618 | instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2619 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2620 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2621 | format %{ "vpaddd $dst,$src1,$src2\t! add packed2I" %} |
kvn@4001 | 2622 | ins_encode %{ |
kvn@4001 | 2623 | bool vector256 = false; |
kvn@4001 | 2624 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2625 | %} |
kvn@4001 | 2626 | ins_pipe( pipe_slow ); |
kvn@4001 | 2627 | %} |
kvn@4001 | 2628 | |
kvn@4001 | 2629 | instruct vadd4I(vecX dst, vecX src) %{ |
kvn@4001 | 2630 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2631 | match(Set dst (AddVI dst src)); |
kvn@4001 | 2632 | format %{ "paddd $dst,$src\t! add packed4I" %} |
kvn@4001 | 2633 | ins_encode %{ |
kvn@4001 | 2634 | __ paddd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2635 | %} |
kvn@4001 | 2636 | ins_pipe( pipe_slow ); |
kvn@4001 | 2637 | %} |
kvn@4001 | 2638 | |
kvn@4001 | 2639 | instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2640 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2641 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2642 | format %{ "vpaddd $dst,$src1,$src2\t! add packed4I" %} |
kvn@4001 | 2643 | ins_encode %{ |
kvn@4001 | 2644 | bool vector256 = false; |
kvn@4001 | 2645 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2646 | %} |
kvn@4001 | 2647 | ins_pipe( pipe_slow ); |
kvn@4001 | 2648 | %} |
kvn@4001 | 2649 | |
kvn@4001 | 2650 | instruct vadd4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2651 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2652 | match(Set dst (AddVI src (LoadVector mem))); |
kvn@4001 | 2653 | format %{ "vpaddd $dst,$src,$mem\t! add packed4I" %} |
kvn@4001 | 2654 | ins_encode %{ |
kvn@4001 | 2655 | bool vector256 = false; |
kvn@4001 | 2656 | __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2657 | %} |
kvn@4001 | 2658 | ins_pipe( pipe_slow ); |
kvn@4001 | 2659 | %} |
kvn@4001 | 2660 | |
kvn@4001 | 2661 | instruct vadd8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2662 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 2663 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2664 | format %{ "vpaddd $dst,$src1,$src2\t! add packed8I" %} |
kvn@4001 | 2665 | ins_encode %{ |
kvn@4001 | 2666 | bool vector256 = true; |
kvn@4001 | 2667 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2668 | %} |
kvn@4001 | 2669 | ins_pipe( pipe_slow ); |
kvn@4001 | 2670 | %} |
kvn@4001 | 2671 | |
kvn@4001 | 2672 | instruct vadd8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2673 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 2674 | match(Set dst (AddVI src (LoadVector mem))); |
kvn@4001 | 2675 | format %{ "vpaddd $dst,$src,$mem\t! add packed8I" %} |
kvn@4001 | 2676 | ins_encode %{ |
kvn@4001 | 2677 | bool vector256 = true; |
kvn@4001 | 2678 | __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2679 | %} |
kvn@4001 | 2680 | ins_pipe( pipe_slow ); |
kvn@4001 | 2681 | %} |
kvn@4001 | 2682 | |
kvn@4001 | 2683 | // Longs vector add |
kvn@4001 | 2684 | instruct vadd2L(vecX dst, vecX src) %{ |
kvn@4001 | 2685 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2686 | match(Set dst (AddVL dst src)); |
kvn@4001 | 2687 | format %{ "paddq $dst,$src\t! add packed2L" %} |
kvn@4001 | 2688 | ins_encode %{ |
kvn@4001 | 2689 | __ paddq($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2690 | %} |
kvn@4001 | 2691 | ins_pipe( pipe_slow ); |
kvn@4001 | 2692 | %} |
kvn@4001 | 2693 | |
kvn@4001 | 2694 | instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2695 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2696 | match(Set dst (AddVL src1 src2)); |
kvn@4001 | 2697 | format %{ "vpaddq $dst,$src1,$src2\t! add packed2L" %} |
kvn@4001 | 2698 | ins_encode %{ |
kvn@4001 | 2699 | bool vector256 = false; |
kvn@4001 | 2700 | __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2701 | %} |
kvn@4001 | 2702 | ins_pipe( pipe_slow ); |
kvn@4001 | 2703 | %} |
kvn@4001 | 2704 | |
kvn@4001 | 2705 | instruct vadd2L_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2706 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2707 | match(Set dst (AddVL src (LoadVector mem))); |
kvn@4001 | 2708 | format %{ "vpaddq $dst,$src,$mem\t! add packed2L" %} |
kvn@4001 | 2709 | ins_encode %{ |
kvn@4001 | 2710 | bool vector256 = false; |
kvn@4001 | 2711 | __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2712 | %} |
kvn@4001 | 2713 | ins_pipe( pipe_slow ); |
kvn@4001 | 2714 | %} |
kvn@4001 | 2715 | |
kvn@4001 | 2716 | instruct vadd4L_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2717 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 2718 | match(Set dst (AddVL src1 src2)); |
kvn@4001 | 2719 | format %{ "vpaddq $dst,$src1,$src2\t! add packed4L" %} |
kvn@4001 | 2720 | ins_encode %{ |
kvn@4001 | 2721 | bool vector256 = true; |
kvn@4001 | 2722 | __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2723 | %} |
kvn@4001 | 2724 | ins_pipe( pipe_slow ); |
kvn@4001 | 2725 | %} |
kvn@4001 | 2726 | |
kvn@4001 | 2727 | instruct vadd4L_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2728 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 2729 | match(Set dst (AddVL src (LoadVector mem))); |
kvn@4001 | 2730 | format %{ "vpaddq $dst,$src,$mem\t! add packed4L" %} |
kvn@4001 | 2731 | ins_encode %{ |
kvn@4001 | 2732 | bool vector256 = true; |
kvn@4001 | 2733 | __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2734 | %} |
kvn@4001 | 2735 | ins_pipe( pipe_slow ); |
kvn@4001 | 2736 | %} |
kvn@4001 | 2737 | |
kvn@4001 | 2738 | // Floats vector add |
kvn@4001 | 2739 | instruct vadd2F(vecD dst, vecD src) %{ |
kvn@4001 | 2740 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2741 | match(Set dst (AddVF dst src)); |
kvn@4001 | 2742 | format %{ "addps $dst,$src\t! add packed2F" %} |
kvn@4001 | 2743 | ins_encode %{ |
kvn@4001 | 2744 | __ addps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2745 | %} |
kvn@4001 | 2746 | ins_pipe( pipe_slow ); |
kvn@4001 | 2747 | %} |
kvn@4001 | 2748 | |
kvn@4001 | 2749 | instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2750 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2751 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2752 | format %{ "vaddps $dst,$src1,$src2\t! add packed2F" %} |
kvn@4001 | 2753 | ins_encode %{ |
kvn@4001 | 2754 | bool vector256 = false; |
kvn@4001 | 2755 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2756 | %} |
kvn@4001 | 2757 | ins_pipe( pipe_slow ); |
kvn@4001 | 2758 | %} |
kvn@4001 | 2759 | |
kvn@4001 | 2760 | instruct vadd4F(vecX dst, vecX src) %{ |
kvn@4001 | 2761 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2762 | match(Set dst (AddVF dst src)); |
kvn@4001 | 2763 | format %{ "addps $dst,$src\t! add packed4F" %} |
kvn@4001 | 2764 | ins_encode %{ |
kvn@4001 | 2765 | __ addps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2766 | %} |
kvn@4001 | 2767 | ins_pipe( pipe_slow ); |
kvn@4001 | 2768 | %} |
kvn@4001 | 2769 | |
kvn@4001 | 2770 | instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2771 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2772 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2773 | format %{ "vaddps $dst,$src1,$src2\t! add packed4F" %} |
kvn@4001 | 2774 | ins_encode %{ |
kvn@4001 | 2775 | bool vector256 = false; |
kvn@4001 | 2776 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2777 | %} |
kvn@4001 | 2778 | ins_pipe( pipe_slow ); |
kvn@4001 | 2779 | %} |
kvn@4001 | 2780 | |
kvn@4001 | 2781 | instruct vadd4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2782 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2783 | match(Set dst (AddVF src (LoadVector mem))); |
kvn@4001 | 2784 | format %{ "vaddps $dst,$src,$mem\t! add packed4F" %} |
kvn@4001 | 2785 | ins_encode %{ |
kvn@4001 | 2786 | bool vector256 = false; |
kvn@4001 | 2787 | __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2788 | %} |
kvn@4001 | 2789 | ins_pipe( pipe_slow ); |
kvn@4001 | 2790 | %} |
kvn@4001 | 2791 | |
kvn@4001 | 2792 | instruct vadd8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2793 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2794 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2795 | format %{ "vaddps $dst,$src1,$src2\t! add packed8F" %} |
kvn@4001 | 2796 | ins_encode %{ |
kvn@4001 | 2797 | bool vector256 = true; |
kvn@4001 | 2798 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2799 | %} |
kvn@4001 | 2800 | ins_pipe( pipe_slow ); |
kvn@4001 | 2801 | %} |
kvn@4001 | 2802 | |
kvn@4001 | 2803 | instruct vadd8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2804 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2805 | match(Set dst (AddVF src (LoadVector mem))); |
kvn@4001 | 2806 | format %{ "vaddps $dst,$src,$mem\t! add packed8F" %} |
kvn@4001 | 2807 | ins_encode %{ |
kvn@4001 | 2808 | bool vector256 = true; |
kvn@4001 | 2809 | __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2810 | %} |
kvn@4001 | 2811 | ins_pipe( pipe_slow ); |
kvn@4001 | 2812 | %} |
kvn@4001 | 2813 | |
kvn@4001 | 2814 | // Doubles vector add |
kvn@4001 | 2815 | instruct vadd2D(vecX dst, vecX src) %{ |
kvn@4001 | 2816 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2817 | match(Set dst (AddVD dst src)); |
kvn@4001 | 2818 | format %{ "addpd $dst,$src\t! add packed2D" %} |
kvn@4001 | 2819 | ins_encode %{ |
kvn@4001 | 2820 | __ addpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2821 | %} |
kvn@4001 | 2822 | ins_pipe( pipe_slow ); |
kvn@4001 | 2823 | %} |
kvn@4001 | 2824 | |
kvn@4001 | 2825 | instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2826 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2827 | match(Set dst (AddVD src1 src2)); |
kvn@4001 | 2828 | format %{ "vaddpd $dst,$src1,$src2\t! add packed2D" %} |
kvn@4001 | 2829 | ins_encode %{ |
kvn@4001 | 2830 | bool vector256 = false; |
kvn@4001 | 2831 | __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2832 | %} |
kvn@4001 | 2833 | ins_pipe( pipe_slow ); |
kvn@4001 | 2834 | %} |
kvn@4001 | 2835 | |
kvn@4001 | 2836 | instruct vadd2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2837 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2838 | match(Set dst (AddVD src (LoadVector mem))); |
kvn@4001 | 2839 | format %{ "vaddpd $dst,$src,$mem\t! add packed2D" %} |
kvn@4001 | 2840 | ins_encode %{ |
kvn@4001 | 2841 | bool vector256 = false; |
kvn@4001 | 2842 | __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2843 | %} |
kvn@4001 | 2844 | ins_pipe( pipe_slow ); |
kvn@4001 | 2845 | %} |
kvn@4001 | 2846 | |
kvn@4001 | 2847 | instruct vadd4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2848 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2849 | match(Set dst (AddVD src1 src2)); |
kvn@4001 | 2850 | format %{ "vaddpd $dst,$src1,$src2\t! add packed4D" %} |
kvn@4001 | 2851 | ins_encode %{ |
kvn@4001 | 2852 | bool vector256 = true; |
kvn@4001 | 2853 | __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2854 | %} |
kvn@4001 | 2855 | ins_pipe( pipe_slow ); |
kvn@4001 | 2856 | %} |
kvn@4001 | 2857 | |
kvn@4001 | 2858 | instruct vadd4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2859 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2860 | match(Set dst (AddVD src (LoadVector mem))); |
kvn@4001 | 2861 | format %{ "vaddpd $dst,$src,$mem\t! add packed4D" %} |
kvn@4001 | 2862 | ins_encode %{ |
kvn@4001 | 2863 | bool vector256 = true; |
kvn@4001 | 2864 | __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2865 | %} |
kvn@4001 | 2866 | ins_pipe( pipe_slow ); |
kvn@4001 | 2867 | %} |
kvn@4001 | 2868 | |
kvn@4001 | 2869 | // --------------------------------- SUB -------------------------------------- |
kvn@4001 | 2870 | |
kvn@4001 | 2871 | // Bytes vector sub |
kvn@4001 | 2872 | instruct vsub4B(vecS dst, vecS src) %{ |
kvn@4001 | 2873 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2874 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2875 | format %{ "psubb $dst,$src\t! sub packed4B" %} |
kvn@4001 | 2876 | ins_encode %{ |
kvn@4001 | 2877 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2878 | %} |
kvn@4001 | 2879 | ins_pipe( pipe_slow ); |
kvn@4001 | 2880 | %} |
kvn@4001 | 2881 | |
kvn@4001 | 2882 | instruct vsub4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2883 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2884 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2885 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed4B" %} |
kvn@4001 | 2886 | ins_encode %{ |
kvn@4001 | 2887 | bool vector256 = false; |
kvn@4001 | 2888 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2889 | %} |
kvn@4001 | 2890 | ins_pipe( pipe_slow ); |
kvn@4001 | 2891 | %} |
kvn@4001 | 2892 | |
kvn@4001 | 2893 | instruct vsub8B(vecD dst, vecD src) %{ |
kvn@4001 | 2894 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2895 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2896 | format %{ "psubb $dst,$src\t! sub packed8B" %} |
kvn@4001 | 2897 | ins_encode %{ |
kvn@4001 | 2898 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2899 | %} |
kvn@4001 | 2900 | ins_pipe( pipe_slow ); |
kvn@4001 | 2901 | %} |
kvn@4001 | 2902 | |
kvn@4001 | 2903 | instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2904 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2905 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2906 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed8B" %} |
kvn@4001 | 2907 | ins_encode %{ |
kvn@4001 | 2908 | bool vector256 = false; |
kvn@4001 | 2909 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2910 | %} |
kvn@4001 | 2911 | ins_pipe( pipe_slow ); |
kvn@4001 | 2912 | %} |
kvn@4001 | 2913 | |
kvn@4001 | 2914 | instruct vsub16B(vecX dst, vecX src) %{ |
kvn@4001 | 2915 | predicate(n->as_Vector()->length() == 16); |
kvn@4001 | 2916 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2917 | format %{ "psubb $dst,$src\t! sub packed16B" %} |
kvn@4001 | 2918 | ins_encode %{ |
kvn@4001 | 2919 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2920 | %} |
kvn@4001 | 2921 | ins_pipe( pipe_slow ); |
kvn@4001 | 2922 | %} |
kvn@4001 | 2923 | |
kvn@4001 | 2924 | instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2925 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2926 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2927 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed16B" %} |
kvn@4001 | 2928 | ins_encode %{ |
kvn@4001 | 2929 | bool vector256 = false; |
kvn@4001 | 2930 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2931 | %} |
kvn@4001 | 2932 | ins_pipe( pipe_slow ); |
kvn@4001 | 2933 | %} |
kvn@4001 | 2934 | |
kvn@4001 | 2935 | instruct vsub16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2936 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2937 | match(Set dst (SubVB src (LoadVector mem))); |
kvn@4001 | 2938 | format %{ "vpsubb $dst,$src,$mem\t! sub packed16B" %} |
kvn@4001 | 2939 | ins_encode %{ |
kvn@4001 | 2940 | bool vector256 = false; |
kvn@4001 | 2941 | __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2942 | %} |
kvn@4001 | 2943 | ins_pipe( pipe_slow ); |
kvn@4001 | 2944 | %} |
kvn@4001 | 2945 | |
kvn@4001 | 2946 | instruct vsub32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2947 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2948 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2949 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed32B" %} |
kvn@4001 | 2950 | ins_encode %{ |
kvn@4001 | 2951 | bool vector256 = true; |
kvn@4001 | 2952 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2953 | %} |
kvn@4001 | 2954 | ins_pipe( pipe_slow ); |
kvn@4001 | 2955 | %} |
kvn@4001 | 2956 | |
kvn@4001 | 2957 | instruct vsub32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2958 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2959 | match(Set dst (SubVB src (LoadVector mem))); |
kvn@4001 | 2960 | format %{ "vpsubb $dst,$src,$mem\t! sub packed32B" %} |
kvn@4001 | 2961 | ins_encode %{ |
kvn@4001 | 2962 | bool vector256 = true; |
kvn@4001 | 2963 | __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2964 | %} |
kvn@4001 | 2965 | ins_pipe( pipe_slow ); |
kvn@4001 | 2966 | %} |
kvn@4001 | 2967 | |
kvn@4001 | 2968 | // Shorts/Chars vector sub |
kvn@4001 | 2969 | instruct vsub2S(vecS dst, vecS src) %{ |
kvn@4001 | 2970 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2971 | match(Set dst (SubVS dst src)); |
kvn@4001 | 2972 | format %{ "psubw $dst,$src\t! sub packed2S" %} |
kvn@4001 | 2973 | ins_encode %{ |
kvn@4001 | 2974 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2975 | %} |
kvn@4001 | 2976 | ins_pipe( pipe_slow ); |
kvn@4001 | 2977 | %} |
kvn@4001 | 2978 | |
kvn@4001 | 2979 | instruct vsub2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2980 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2981 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 2982 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed2S" %} |
kvn@4001 | 2983 | ins_encode %{ |
kvn@4001 | 2984 | bool vector256 = false; |
kvn@4001 | 2985 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2986 | %} |
kvn@4001 | 2987 | ins_pipe( pipe_slow ); |
kvn@4001 | 2988 | %} |
kvn@4001 | 2989 | |
kvn@4001 | 2990 | instruct vsub4S(vecD dst, vecD src) %{ |
kvn@4001 | 2991 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2992 | match(Set dst (SubVS dst src)); |
kvn@4001 | 2993 | format %{ "psubw $dst,$src\t! sub packed4S" %} |
kvn@4001 | 2994 | ins_encode %{ |
kvn@4001 | 2995 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2996 | %} |
kvn@4001 | 2997 | ins_pipe( pipe_slow ); |
kvn@4001 | 2998 | %} |
kvn@4001 | 2999 | |
kvn@4001 | 3000 | instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3001 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3002 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 3003 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed4S" %} |
kvn@4001 | 3004 | ins_encode %{ |
kvn@4001 | 3005 | bool vector256 = false; |
kvn@4001 | 3006 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3007 | %} |
kvn@4001 | 3008 | ins_pipe( pipe_slow ); |
kvn@4001 | 3009 | %} |
kvn@4001 | 3010 | |
kvn@4001 | 3011 | instruct vsub8S(vecX dst, vecX src) %{ |
kvn@4001 | 3012 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3013 | match(Set dst (SubVS dst src)); |
kvn@4001 | 3014 | format %{ "psubw $dst,$src\t! sub packed8S" %} |
kvn@4001 | 3015 | ins_encode %{ |
kvn@4001 | 3016 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3017 | %} |
kvn@4001 | 3018 | ins_pipe( pipe_slow ); |
kvn@4001 | 3019 | %} |
kvn@4001 | 3020 | |
kvn@4001 | 3021 | instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3022 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3023 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 3024 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed8S" %} |
kvn@4001 | 3025 | ins_encode %{ |
kvn@4001 | 3026 | bool vector256 = false; |
kvn@4001 | 3027 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3028 | %} |
kvn@4001 | 3029 | ins_pipe( pipe_slow ); |
kvn@4001 | 3030 | %} |
kvn@4001 | 3031 | |
kvn@4001 | 3032 | instruct vsub8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3033 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3034 | match(Set dst (SubVS src (LoadVector mem))); |
kvn@4001 | 3035 | format %{ "vpsubw $dst,$src,$mem\t! sub packed8S" %} |
kvn@4001 | 3036 | ins_encode %{ |
kvn@4001 | 3037 | bool vector256 = false; |
kvn@4001 | 3038 | __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3039 | %} |
kvn@4001 | 3040 | ins_pipe( pipe_slow ); |
kvn@4001 | 3041 | %} |
kvn@4001 | 3042 | |
kvn@4001 | 3043 | instruct vsub16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3044 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3045 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 3046 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed16S" %} |
kvn@4001 | 3047 | ins_encode %{ |
kvn@4001 | 3048 | bool vector256 = true; |
kvn@4001 | 3049 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3050 | %} |
kvn@4001 | 3051 | ins_pipe( pipe_slow ); |
kvn@4001 | 3052 | %} |
kvn@4001 | 3053 | |
kvn@4001 | 3054 | instruct vsub16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3055 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3056 | match(Set dst (SubVS src (LoadVector mem))); |
kvn@4001 | 3057 | format %{ "vpsubw $dst,$src,$mem\t! sub packed16S" %} |
kvn@4001 | 3058 | ins_encode %{ |
kvn@4001 | 3059 | bool vector256 = true; |
kvn@4001 | 3060 | __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3061 | %} |
kvn@4001 | 3062 | ins_pipe( pipe_slow ); |
kvn@4001 | 3063 | %} |
kvn@4001 | 3064 | |
kvn@4001 | 3065 | // Integers vector sub |
kvn@4001 | 3066 | instruct vsub2I(vecD dst, vecD src) %{ |
kvn@4001 | 3067 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3068 | match(Set dst (SubVI dst src)); |
kvn@4001 | 3069 | format %{ "psubd $dst,$src\t! sub packed2I" %} |
kvn@4001 | 3070 | ins_encode %{ |
kvn@4001 | 3071 | __ psubd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3072 | %} |
kvn@4001 | 3073 | ins_pipe( pipe_slow ); |
kvn@4001 | 3074 | %} |
kvn@4001 | 3075 | |
kvn@4001 | 3076 | instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3077 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3078 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3079 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed2I" %} |
kvn@4001 | 3080 | ins_encode %{ |
kvn@4001 | 3081 | bool vector256 = false; |
kvn@4001 | 3082 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3083 | %} |
kvn@4001 | 3084 | ins_pipe( pipe_slow ); |
kvn@4001 | 3085 | %} |
kvn@4001 | 3086 | |
kvn@4001 | 3087 | instruct vsub4I(vecX dst, vecX src) %{ |
kvn@4001 | 3088 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3089 | match(Set dst (SubVI dst src)); |
kvn@4001 | 3090 | format %{ "psubd $dst,$src\t! sub packed4I" %} |
kvn@4001 | 3091 | ins_encode %{ |
kvn@4001 | 3092 | __ psubd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3093 | %} |
kvn@4001 | 3094 | ins_pipe( pipe_slow ); |
kvn@4001 | 3095 | %} |
kvn@4001 | 3096 | |
kvn@4001 | 3097 | instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3098 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3099 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3100 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed4I" %} |
kvn@4001 | 3101 | ins_encode %{ |
kvn@4001 | 3102 | bool vector256 = false; |
kvn@4001 | 3103 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3104 | %} |
kvn@4001 | 3105 | ins_pipe( pipe_slow ); |
kvn@4001 | 3106 | %} |
kvn@4001 | 3107 | |
kvn@4001 | 3108 | instruct vsub4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3109 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3110 | match(Set dst (SubVI src (LoadVector mem))); |
kvn@4001 | 3111 | format %{ "vpsubd $dst,$src,$mem\t! sub packed4I" %} |
kvn@4001 | 3112 | ins_encode %{ |
kvn@4001 | 3113 | bool vector256 = false; |
kvn@4001 | 3114 | __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3115 | %} |
kvn@4001 | 3116 | ins_pipe( pipe_slow ); |
kvn@4001 | 3117 | %} |
kvn@4001 | 3118 | |
kvn@4001 | 3119 | instruct vsub8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3120 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3121 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3122 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed8I" %} |
kvn@4001 | 3123 | ins_encode %{ |
kvn@4001 | 3124 | bool vector256 = true; |
kvn@4001 | 3125 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3126 | %} |
kvn@4001 | 3127 | ins_pipe( pipe_slow ); |
kvn@4001 | 3128 | %} |
kvn@4001 | 3129 | |
kvn@4001 | 3130 | instruct vsub8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3131 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3132 | match(Set dst (SubVI src (LoadVector mem))); |
kvn@4001 | 3133 | format %{ "vpsubd $dst,$src,$mem\t! sub packed8I" %} |
kvn@4001 | 3134 | ins_encode %{ |
kvn@4001 | 3135 | bool vector256 = true; |
kvn@4001 | 3136 | __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3137 | %} |
kvn@4001 | 3138 | ins_pipe( pipe_slow ); |
kvn@4001 | 3139 | %} |
kvn@4001 | 3140 | |
kvn@4001 | 3141 | // Longs vector sub |
kvn@4001 | 3142 | instruct vsub2L(vecX dst, vecX src) %{ |
kvn@4001 | 3143 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3144 | match(Set dst (SubVL dst src)); |
kvn@4001 | 3145 | format %{ "psubq $dst,$src\t! sub packed2L" %} |
kvn@4001 | 3146 | ins_encode %{ |
kvn@4001 | 3147 | __ psubq($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3148 | %} |
kvn@4001 | 3149 | ins_pipe( pipe_slow ); |
kvn@4001 | 3150 | %} |
kvn@4001 | 3151 | |
kvn@4001 | 3152 | instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3153 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3154 | match(Set dst (SubVL src1 src2)); |
kvn@4001 | 3155 | format %{ "vpsubq $dst,$src1,$src2\t! sub packed2L" %} |
kvn@4001 | 3156 | ins_encode %{ |
kvn@4001 | 3157 | bool vector256 = false; |
kvn@4001 | 3158 | __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3159 | %} |
kvn@4001 | 3160 | ins_pipe( pipe_slow ); |
kvn@4001 | 3161 | %} |
kvn@4001 | 3162 | |
kvn@4001 | 3163 | instruct vsub2L_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3164 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3165 | match(Set dst (SubVL src (LoadVector mem))); |
kvn@4001 | 3166 | format %{ "vpsubq $dst,$src,$mem\t! sub packed2L" %} |
kvn@4001 | 3167 | ins_encode %{ |
kvn@4001 | 3168 | bool vector256 = false; |
kvn@4001 | 3169 | __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3170 | %} |
kvn@4001 | 3171 | ins_pipe( pipe_slow ); |
kvn@4001 | 3172 | %} |
kvn@4001 | 3173 | |
kvn@4001 | 3174 | instruct vsub4L_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3175 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 3176 | match(Set dst (SubVL src1 src2)); |
kvn@4001 | 3177 | format %{ "vpsubq $dst,$src1,$src2\t! sub packed4L" %} |
kvn@4001 | 3178 | ins_encode %{ |
kvn@4001 | 3179 | bool vector256 = true; |
kvn@4001 | 3180 | __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3181 | %} |
kvn@4001 | 3182 | ins_pipe( pipe_slow ); |
kvn@4001 | 3183 | %} |
kvn@4001 | 3184 | |
kvn@4001 | 3185 | instruct vsub4L_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3186 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 3187 | match(Set dst (SubVL src (LoadVector mem))); |
kvn@4001 | 3188 | format %{ "vpsubq $dst,$src,$mem\t! sub packed4L" %} |
kvn@4001 | 3189 | ins_encode %{ |
kvn@4001 | 3190 | bool vector256 = true; |
kvn@4001 | 3191 | __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3192 | %} |
kvn@4001 | 3193 | ins_pipe( pipe_slow ); |
kvn@4001 | 3194 | %} |
kvn@4001 | 3195 | |
kvn@4001 | 3196 | // Floats vector sub |
kvn@4001 | 3197 | instruct vsub2F(vecD dst, vecD src) %{ |
kvn@4001 | 3198 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3199 | match(Set dst (SubVF dst src)); |
kvn@4001 | 3200 | format %{ "subps $dst,$src\t! sub packed2F" %} |
kvn@4001 | 3201 | ins_encode %{ |
kvn@4001 | 3202 | __ subps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3203 | %} |
kvn@4001 | 3204 | ins_pipe( pipe_slow ); |
kvn@4001 | 3205 | %} |
kvn@4001 | 3206 | |
kvn@4001 | 3207 | instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3208 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3209 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3210 | format %{ "vsubps $dst,$src1,$src2\t! sub packed2F" %} |
kvn@4001 | 3211 | ins_encode %{ |
kvn@4001 | 3212 | bool vector256 = false; |
kvn@4001 | 3213 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3214 | %} |
kvn@4001 | 3215 | ins_pipe( pipe_slow ); |
kvn@4001 | 3216 | %} |
kvn@4001 | 3217 | |
kvn@4001 | 3218 | instruct vsub4F(vecX dst, vecX src) %{ |
kvn@4001 | 3219 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3220 | match(Set dst (SubVF dst src)); |
kvn@4001 | 3221 | format %{ "subps $dst,$src\t! sub packed4F" %} |
kvn@4001 | 3222 | ins_encode %{ |
kvn@4001 | 3223 | __ subps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3224 | %} |
kvn@4001 | 3225 | ins_pipe( pipe_slow ); |
kvn@4001 | 3226 | %} |
kvn@4001 | 3227 | |
kvn@4001 | 3228 | instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3229 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3230 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3231 | format %{ "vsubps $dst,$src1,$src2\t! sub packed4F" %} |
kvn@4001 | 3232 | ins_encode %{ |
kvn@4001 | 3233 | bool vector256 = false; |
kvn@4001 | 3234 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3235 | %} |
kvn@4001 | 3236 | ins_pipe( pipe_slow ); |
kvn@4001 | 3237 | %} |
kvn@4001 | 3238 | |
kvn@4001 | 3239 | instruct vsub4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3240 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3241 | match(Set dst (SubVF src (LoadVector mem))); |
kvn@4001 | 3242 | format %{ "vsubps $dst,$src,$mem\t! sub packed4F" %} |
kvn@4001 | 3243 | ins_encode %{ |
kvn@4001 | 3244 | bool vector256 = false; |
kvn@4001 | 3245 | __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3246 | %} |
kvn@4001 | 3247 | ins_pipe( pipe_slow ); |
kvn@4001 | 3248 | %} |
kvn@4001 | 3249 | |
kvn@4001 | 3250 | instruct vsub8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3251 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3252 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3253 | format %{ "vsubps $dst,$src1,$src2\t! sub packed8F" %} |
kvn@4001 | 3254 | ins_encode %{ |
kvn@4001 | 3255 | bool vector256 = true; |
kvn@4001 | 3256 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3257 | %} |
kvn@4001 | 3258 | ins_pipe( pipe_slow ); |
kvn@4001 | 3259 | %} |
kvn@4001 | 3260 | |
kvn@4001 | 3261 | instruct vsub8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3262 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3263 | match(Set dst (SubVF src (LoadVector mem))); |
kvn@4001 | 3264 | format %{ "vsubps $dst,$src,$mem\t! sub packed8F" %} |
kvn@4001 | 3265 | ins_encode %{ |
kvn@4001 | 3266 | bool vector256 = true; |
kvn@4001 | 3267 | __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3268 | %} |
kvn@4001 | 3269 | ins_pipe( pipe_slow ); |
kvn@4001 | 3270 | %} |
kvn@4001 | 3271 | |
kvn@4001 | 3272 | // Doubles vector sub |
kvn@4001 | 3273 | instruct vsub2D(vecX dst, vecX src) %{ |
kvn@4001 | 3274 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3275 | match(Set dst (SubVD dst src)); |
kvn@4001 | 3276 | format %{ "subpd $dst,$src\t! sub packed2D" %} |
kvn@4001 | 3277 | ins_encode %{ |
kvn@4001 | 3278 | __ subpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3279 | %} |
kvn@4001 | 3280 | ins_pipe( pipe_slow ); |
kvn@4001 | 3281 | %} |
kvn@4001 | 3282 | |
kvn@4001 | 3283 | instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3284 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3285 | match(Set dst (SubVD src1 src2)); |
kvn@4001 | 3286 | format %{ "vsubpd $dst,$src1,$src2\t! sub packed2D" %} |
kvn@4001 | 3287 | ins_encode %{ |
kvn@4001 | 3288 | bool vector256 = false; |
kvn@4001 | 3289 | __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3290 | %} |
kvn@4001 | 3291 | ins_pipe( pipe_slow ); |
kvn@4001 | 3292 | %} |
kvn@4001 | 3293 | |
kvn@4001 | 3294 | instruct vsub2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3295 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3296 | match(Set dst (SubVD src (LoadVector mem))); |
kvn@4001 | 3297 | format %{ "vsubpd $dst,$src,$mem\t! sub packed2D" %} |
kvn@4001 | 3298 | ins_encode %{ |
kvn@4001 | 3299 | bool vector256 = false; |
kvn@4001 | 3300 | __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3301 | %} |
kvn@4001 | 3302 | ins_pipe( pipe_slow ); |
kvn@4001 | 3303 | %} |
kvn@4001 | 3304 | |
kvn@4001 | 3305 | instruct vsub4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3306 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3307 | match(Set dst (SubVD src1 src2)); |
kvn@4001 | 3308 | format %{ "vsubpd $dst,$src1,$src2\t! sub packed4D" %} |
kvn@4001 | 3309 | ins_encode %{ |
kvn@4001 | 3310 | bool vector256 = true; |
kvn@4001 | 3311 | __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3312 | %} |
kvn@4001 | 3313 | ins_pipe( pipe_slow ); |
kvn@4001 | 3314 | %} |
kvn@4001 | 3315 | |
kvn@4001 | 3316 | instruct vsub4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3317 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3318 | match(Set dst (SubVD src (LoadVector mem))); |
kvn@4001 | 3319 | format %{ "vsubpd $dst,$src,$mem\t! sub packed4D" %} |
kvn@4001 | 3320 | ins_encode %{ |
kvn@4001 | 3321 | bool vector256 = true; |
kvn@4001 | 3322 | __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3323 | %} |
kvn@4001 | 3324 | ins_pipe( pipe_slow ); |
kvn@4001 | 3325 | %} |
kvn@4001 | 3326 | |
kvn@4001 | 3327 | // --------------------------------- MUL -------------------------------------- |
kvn@4001 | 3328 | |
kvn@4001 | 3329 | // Shorts/Chars vector mul |
kvn@4001 | 3330 | instruct vmul2S(vecS dst, vecS src) %{ |
kvn@4001 | 3331 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3332 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3333 | format %{ "pmullw $dst,$src\t! mul packed2S" %} |
kvn@4001 | 3334 | ins_encode %{ |
kvn@4001 | 3335 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3336 | %} |
kvn@4001 | 3337 | ins_pipe( pipe_slow ); |
kvn@4001 | 3338 | %} |
kvn@4001 | 3339 | |
kvn@4001 | 3340 | instruct vmul2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 3341 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3342 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3343 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed2S" %} |
kvn@4001 | 3344 | ins_encode %{ |
kvn@4001 | 3345 | bool vector256 = false; |
kvn@4001 | 3346 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3347 | %} |
kvn@4001 | 3348 | ins_pipe( pipe_slow ); |
kvn@4001 | 3349 | %} |
kvn@4001 | 3350 | |
kvn@4001 | 3351 | instruct vmul4S(vecD dst, vecD src) %{ |
kvn@4001 | 3352 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3353 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3354 | format %{ "pmullw $dst,$src\t! mul packed4S" %} |
kvn@4001 | 3355 | ins_encode %{ |
kvn@4001 | 3356 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3357 | %} |
kvn@4001 | 3358 | ins_pipe( pipe_slow ); |
kvn@4001 | 3359 | %} |
kvn@4001 | 3360 | |
kvn@4001 | 3361 | instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3362 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3363 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3364 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed4S" %} |
kvn@4001 | 3365 | ins_encode %{ |
kvn@4001 | 3366 | bool vector256 = false; |
kvn@4001 | 3367 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3368 | %} |
kvn@4001 | 3369 | ins_pipe( pipe_slow ); |
kvn@4001 | 3370 | %} |
kvn@4001 | 3371 | |
kvn@4001 | 3372 | instruct vmul8S(vecX dst, vecX src) %{ |
kvn@4001 | 3373 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3374 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3375 | format %{ "pmullw $dst,$src\t! mul packed8S" %} |
kvn@4001 | 3376 | ins_encode %{ |
kvn@4001 | 3377 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3378 | %} |
kvn@4001 | 3379 | ins_pipe( pipe_slow ); |
kvn@4001 | 3380 | %} |
kvn@4001 | 3381 | |
kvn@4001 | 3382 | instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3383 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3384 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3385 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed8S" %} |
kvn@4001 | 3386 | ins_encode %{ |
kvn@4001 | 3387 | bool vector256 = false; |
kvn@4001 | 3388 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3389 | %} |
kvn@4001 | 3390 | ins_pipe( pipe_slow ); |
kvn@4001 | 3391 | %} |
kvn@4001 | 3392 | |
kvn@4001 | 3393 | instruct vmul8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3394 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3395 | match(Set dst (MulVS src (LoadVector mem))); |
kvn@4001 | 3396 | format %{ "vpmullw $dst,$src,$mem\t! mul packed8S" %} |
kvn@4001 | 3397 | ins_encode %{ |
kvn@4001 | 3398 | bool vector256 = false; |
kvn@4001 | 3399 | __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3400 | %} |
kvn@4001 | 3401 | ins_pipe( pipe_slow ); |
kvn@4001 | 3402 | %} |
kvn@4001 | 3403 | |
kvn@4001 | 3404 | instruct vmul16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3405 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3406 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3407 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed16S" %} |
kvn@4001 | 3408 | ins_encode %{ |
kvn@4001 | 3409 | bool vector256 = true; |
kvn@4001 | 3410 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3411 | %} |
kvn@4001 | 3412 | ins_pipe( pipe_slow ); |
kvn@4001 | 3413 | %} |
kvn@4001 | 3414 | |
kvn@4001 | 3415 | instruct vmul16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3416 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3417 | match(Set dst (MulVS src (LoadVector mem))); |
kvn@4001 | 3418 | format %{ "vpmullw $dst,$src,$mem\t! mul packed16S" %} |
kvn@4001 | 3419 | ins_encode %{ |
kvn@4001 | 3420 | bool vector256 = true; |
kvn@4001 | 3421 | __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3422 | %} |
kvn@4001 | 3423 | ins_pipe( pipe_slow ); |
kvn@4001 | 3424 | %} |
kvn@4001 | 3425 | |
kvn@4001 | 3426 | // Integers vector mul (sse4_1) |
kvn@4001 | 3427 | instruct vmul2I(vecD dst, vecD src) %{ |
kvn@4001 | 3428 | predicate(UseSSE > 3 && n->as_Vector()->length() == 2); |
kvn@4001 | 3429 | match(Set dst (MulVI dst src)); |
kvn@4001 | 3430 | format %{ "pmulld $dst,$src\t! mul packed2I" %} |
kvn@4001 | 3431 | ins_encode %{ |
kvn@4001 | 3432 | __ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3433 | %} |
kvn@4001 | 3434 | ins_pipe( pipe_slow ); |
kvn@4001 | 3435 | %} |
kvn@4001 | 3436 | |
kvn@4001 | 3437 | instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3438 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3439 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3440 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed2I" %} |
kvn@4001 | 3441 | ins_encode %{ |
kvn@4001 | 3442 | bool vector256 = false; |
kvn@4001 | 3443 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3444 | %} |
kvn@4001 | 3445 | ins_pipe( pipe_slow ); |
kvn@4001 | 3446 | %} |
kvn@4001 | 3447 | |
kvn@4001 | 3448 | instruct vmul4I(vecX dst, vecX src) %{ |
kvn@4001 | 3449 | predicate(UseSSE > 3 && n->as_Vector()->length() == 4); |
kvn@4001 | 3450 | match(Set dst (MulVI dst src)); |
kvn@4001 | 3451 | format %{ "pmulld $dst,$src\t! mul packed4I" %} |
kvn@4001 | 3452 | ins_encode %{ |
kvn@4001 | 3453 | __ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3454 | %} |
kvn@4001 | 3455 | ins_pipe( pipe_slow ); |
kvn@4001 | 3456 | %} |
kvn@4001 | 3457 | |
kvn@4001 | 3458 | instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3459 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3460 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3461 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed4I" %} |
kvn@4001 | 3462 | ins_encode %{ |
kvn@4001 | 3463 | bool vector256 = false; |
kvn@4001 | 3464 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3465 | %} |
kvn@4001 | 3466 | ins_pipe( pipe_slow ); |
kvn@4001 | 3467 | %} |
kvn@4001 | 3468 | |
kvn@4001 | 3469 | instruct vmul4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3470 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3471 | match(Set dst (MulVI src (LoadVector mem))); |
kvn@4001 | 3472 | format %{ "vpmulld $dst,$src,$mem\t! mul packed4I" %} |
kvn@4001 | 3473 | ins_encode %{ |
kvn@4001 | 3474 | bool vector256 = false; |
kvn@4001 | 3475 | __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3476 | %} |
kvn@4001 | 3477 | ins_pipe( pipe_slow ); |
kvn@4001 | 3478 | %} |
kvn@4001 | 3479 | |
kvn@4001 | 3480 | instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3481 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3482 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3483 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %} |
kvn@4001 | 3484 | ins_encode %{ |
kvn@4001 | 3485 | bool vector256 = true; |
kvn@4001 | 3486 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3487 | %} |
kvn@4001 | 3488 | ins_pipe( pipe_slow ); |
kvn@4001 | 3489 | %} |
kvn@4001 | 3490 | |
kvn@4001 | 3491 | instruct vmul8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3492 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3493 | match(Set dst (MulVI src (LoadVector mem))); |
kvn@4001 | 3494 | format %{ "vpmulld $dst,$src,$mem\t! mul packed8I" %} |
kvn@4001 | 3495 | ins_encode %{ |
kvn@4001 | 3496 | bool vector256 = true; |
kvn@4001 | 3497 | __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3498 | %} |
kvn@4001 | 3499 | ins_pipe( pipe_slow ); |
kvn@4001 | 3500 | %} |
kvn@4001 | 3501 | |
kvn@4001 | 3502 | // Floats vector mul |
kvn@4001 | 3503 | instruct vmul2F(vecD dst, vecD src) %{ |
kvn@4001 | 3504 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3505 | match(Set dst (MulVF dst src)); |
kvn@4001 | 3506 | format %{ "mulps $dst,$src\t! mul packed2F" %} |
kvn@4001 | 3507 | ins_encode %{ |
kvn@4001 | 3508 | __ mulps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3509 | %} |
kvn@4001 | 3510 | ins_pipe( pipe_slow ); |
kvn@4001 | 3511 | %} |
kvn@4001 | 3512 | |
kvn@4001 | 3513 | instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3514 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3515 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3516 | format %{ "vmulps $dst,$src1,$src2\t! mul packed2F" %} |
kvn@4001 | 3517 | ins_encode %{ |
kvn@4001 | 3518 | bool vector256 = false; |
kvn@4001 | 3519 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3520 | %} |
kvn@4001 | 3521 | ins_pipe( pipe_slow ); |
kvn@4001 | 3522 | %} |
kvn@4001 | 3523 | |
kvn@4001 | 3524 | instruct vmul4F(vecX dst, vecX src) %{ |
kvn@4001 | 3525 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3526 | match(Set dst (MulVF dst src)); |
kvn@4001 | 3527 | format %{ "mulps $dst,$src\t! mul packed4F" %} |
kvn@4001 | 3528 | ins_encode %{ |
kvn@4001 | 3529 | __ mulps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3530 | %} |
kvn@4001 | 3531 | ins_pipe( pipe_slow ); |
kvn@4001 | 3532 | %} |
kvn@4001 | 3533 | |
kvn@4001 | 3534 | instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3535 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3536 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3537 | format %{ "vmulps $dst,$src1,$src2\t! mul packed4F" %} |
kvn@4001 | 3538 | ins_encode %{ |
kvn@4001 | 3539 | bool vector256 = false; |
kvn@4001 | 3540 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3541 | %} |
kvn@4001 | 3542 | ins_pipe( pipe_slow ); |
kvn@4001 | 3543 | %} |
kvn@4001 | 3544 | |
kvn@4001 | 3545 | instruct vmul4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3546 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3547 | match(Set dst (MulVF src (LoadVector mem))); |
kvn@4001 | 3548 | format %{ "vmulps $dst,$src,$mem\t! mul packed4F" %} |
kvn@4001 | 3549 | ins_encode %{ |
kvn@4001 | 3550 | bool vector256 = false; |
kvn@4001 | 3551 | __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3552 | %} |
kvn@4001 | 3553 | ins_pipe( pipe_slow ); |
kvn@4001 | 3554 | %} |
kvn@4001 | 3555 | |
kvn@4001 | 3556 | instruct vmul8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3557 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3558 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3559 | format %{ "vmulps $dst,$src1,$src2\t! mul packed8F" %} |
kvn@4001 | 3560 | ins_encode %{ |
kvn@4001 | 3561 | bool vector256 = true; |
kvn@4001 | 3562 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3563 | %} |
kvn@4001 | 3564 | ins_pipe( pipe_slow ); |
kvn@4001 | 3565 | %} |
kvn@4001 | 3566 | |
kvn@4001 | 3567 | instruct vmul8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3568 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3569 | match(Set dst (MulVF src (LoadVector mem))); |
kvn@4001 | 3570 | format %{ "vmulps $dst,$src,$mem\t! mul packed8F" %} |
kvn@4001 | 3571 | ins_encode %{ |
kvn@4001 | 3572 | bool vector256 = true; |
kvn@4001 | 3573 | __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3574 | %} |
kvn@4001 | 3575 | ins_pipe( pipe_slow ); |
kvn@4001 | 3576 | %} |
kvn@4001 | 3577 | |
kvn@4001 | 3578 | // Doubles vector mul |
kvn@4001 | 3579 | instruct vmul2D(vecX dst, vecX src) %{ |
kvn@4001 | 3580 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3581 | match(Set dst (MulVD dst src)); |
kvn@4001 | 3582 | format %{ "mulpd $dst,$src\t! mul packed2D" %} |
kvn@4001 | 3583 | ins_encode %{ |
kvn@4001 | 3584 | __ mulpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3585 | %} |
kvn@4001 | 3586 | ins_pipe( pipe_slow ); |
kvn@4001 | 3587 | %} |
kvn@4001 | 3588 | |
kvn@4001 | 3589 | instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3590 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3591 | match(Set dst (MulVD src1 src2)); |
kvn@4001 | 3592 | format %{ "vmulpd $dst,$src1,$src2\t! mul packed2D" %} |
kvn@4001 | 3593 | ins_encode %{ |
kvn@4001 | 3594 | bool vector256 = false; |
kvn@4001 | 3595 | __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3596 | %} |
kvn@4001 | 3597 | ins_pipe( pipe_slow ); |
kvn@4001 | 3598 | %} |
kvn@4001 | 3599 | |
kvn@4001 | 3600 | instruct vmul2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3601 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3602 | match(Set dst (MulVD src (LoadVector mem))); |
kvn@4001 | 3603 | format %{ "vmulpd $dst,$src,$mem\t! mul packed2D" %} |
kvn@4001 | 3604 | ins_encode %{ |
kvn@4001 | 3605 | bool vector256 = false; |
kvn@4001 | 3606 | __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3607 | %} |
kvn@4001 | 3608 | ins_pipe( pipe_slow ); |
kvn@4001 | 3609 | %} |
kvn@4001 | 3610 | |
kvn@4001 | 3611 | instruct vmul4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3612 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3613 | match(Set dst (MulVD src1 src2)); |
kvn@4001 | 3614 | format %{ "vmulpd $dst,$src1,$src2\t! mul packed4D" %} |
kvn@4001 | 3615 | ins_encode %{ |
kvn@4001 | 3616 | bool vector256 = true; |
kvn@4001 | 3617 | __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3618 | %} |
kvn@4001 | 3619 | ins_pipe( pipe_slow ); |
kvn@4001 | 3620 | %} |
kvn@4001 | 3621 | |
kvn@4001 | 3622 | instruct vmul4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3623 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3624 | match(Set dst (MulVD src (LoadVector mem))); |
kvn@4001 | 3625 | format %{ "vmulpd $dst,$src,$mem\t! mul packed4D" %} |
kvn@4001 | 3626 | ins_encode %{ |
kvn@4001 | 3627 | bool vector256 = true; |
kvn@4001 | 3628 | __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3629 | %} |
kvn@4001 | 3630 | ins_pipe( pipe_slow ); |
kvn@4001 | 3631 | %} |
kvn@4001 | 3632 | |
kvn@4001 | 3633 | // --------------------------------- DIV -------------------------------------- |
kvn@4001 | 3634 | |
kvn@4001 | 3635 | // Floats vector div |
kvn@4001 | 3636 | instruct vdiv2F(vecD dst, vecD src) %{ |
kvn@4001 | 3637 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3638 | match(Set dst (DivVF dst src)); |
kvn@4001 | 3639 | format %{ "divps $dst,$src\t! div packed2F" %} |
kvn@4001 | 3640 | ins_encode %{ |
kvn@4001 | 3641 | __ divps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3642 | %} |
kvn@4001 | 3643 | ins_pipe( pipe_slow ); |
kvn@4001 | 3644 | %} |
kvn@4001 | 3645 | |
kvn@4001 | 3646 | instruct vdiv2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3647 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3648 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3649 | format %{ "vdivps $dst,$src1,$src2\t! div packed2F" %} |
kvn@4001 | 3650 | ins_encode %{ |
kvn@4001 | 3651 | bool vector256 = false; |
kvn@4001 | 3652 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3653 | %} |
kvn@4001 | 3654 | ins_pipe( pipe_slow ); |
kvn@4001 | 3655 | %} |
kvn@4001 | 3656 | |
kvn@4001 | 3657 | instruct vdiv4F(vecX dst, vecX src) %{ |
kvn@4001 | 3658 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3659 | match(Set dst (DivVF dst src)); |
kvn@4001 | 3660 | format %{ "divps $dst,$src\t! div packed4F" %} |
kvn@4001 | 3661 | ins_encode %{ |
kvn@4001 | 3662 | __ divps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3663 | %} |
kvn@4001 | 3664 | ins_pipe( pipe_slow ); |
kvn@4001 | 3665 | %} |
kvn@4001 | 3666 | |
kvn@4001 | 3667 | instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3668 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3669 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3670 | format %{ "vdivps $dst,$src1,$src2\t! div packed4F" %} |
kvn@4001 | 3671 | ins_encode %{ |
kvn@4001 | 3672 | bool vector256 = false; |
kvn@4001 | 3673 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3674 | %} |
kvn@4001 | 3675 | ins_pipe( pipe_slow ); |
kvn@4001 | 3676 | %} |
kvn@4001 | 3677 | |
kvn@4001 | 3678 | instruct vdiv4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3679 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3680 | match(Set dst (DivVF src (LoadVector mem))); |
kvn@4001 | 3681 | format %{ "vdivps $dst,$src,$mem\t! div packed4F" %} |
kvn@4001 | 3682 | ins_encode %{ |
kvn@4001 | 3683 | bool vector256 = false; |
kvn@4001 | 3684 | __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3685 | %} |
kvn@4001 | 3686 | ins_pipe( pipe_slow ); |
kvn@4001 | 3687 | %} |
kvn@4001 | 3688 | |
kvn@4001 | 3689 | instruct vdiv8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3690 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3691 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3692 | format %{ "vdivps $dst,$src1,$src2\t! div packed8F" %} |
kvn@4001 | 3693 | ins_encode %{ |
kvn@4001 | 3694 | bool vector256 = true; |
kvn@4001 | 3695 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3696 | %} |
kvn@4001 | 3697 | ins_pipe( pipe_slow ); |
kvn@4001 | 3698 | %} |
kvn@4001 | 3699 | |
kvn@4001 | 3700 | instruct vdiv8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3701 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3702 | match(Set dst (DivVF src (LoadVector mem))); |
kvn@4001 | 3703 | format %{ "vdivps $dst,$src,$mem\t! div packed8F" %} |
kvn@4001 | 3704 | ins_encode %{ |
kvn@4001 | 3705 | bool vector256 = true; |
kvn@4001 | 3706 | __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3707 | %} |
kvn@4001 | 3708 | ins_pipe( pipe_slow ); |
kvn@4001 | 3709 | %} |
kvn@4001 | 3710 | |
kvn@4001 | 3711 | // Doubles vector div |
kvn@4001 | 3712 | instruct vdiv2D(vecX dst, vecX src) %{ |
kvn@4001 | 3713 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3714 | match(Set dst (DivVD dst src)); |
kvn@4001 | 3715 | format %{ "divpd $dst,$src\t! div packed2D" %} |
kvn@4001 | 3716 | ins_encode %{ |
kvn@4001 | 3717 | __ divpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3718 | %} |
kvn@4001 | 3719 | ins_pipe( pipe_slow ); |
kvn@4001 | 3720 | %} |
kvn@4001 | 3721 | |
kvn@4001 | 3722 | instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3723 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3724 | match(Set dst (DivVD src1 src2)); |
kvn@4001 | 3725 | format %{ "vdivpd $dst,$src1,$src2\t! div packed2D" %} |
kvn@4001 | 3726 | ins_encode %{ |
kvn@4001 | 3727 | bool vector256 = false; |
kvn@4001 | 3728 | __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3729 | %} |
kvn@4001 | 3730 | ins_pipe( pipe_slow ); |
kvn@4001 | 3731 | %} |
kvn@4001 | 3732 | |
kvn@4001 | 3733 | instruct vdiv2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3734 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3735 | match(Set dst (DivVD src (LoadVector mem))); |
kvn@4001 | 3736 | format %{ "vdivpd $dst,$src,$mem\t! div packed2D" %} |
kvn@4001 | 3737 | ins_encode %{ |
kvn@4001 | 3738 | bool vector256 = false; |
kvn@4001 | 3739 | __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3740 | %} |
kvn@4001 | 3741 | ins_pipe( pipe_slow ); |
kvn@4001 | 3742 | %} |
kvn@4001 | 3743 | |
kvn@4001 | 3744 | instruct vdiv4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3745 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3746 | match(Set dst (DivVD src1 src2)); |
kvn@4001 | 3747 | format %{ "vdivpd $dst,$src1,$src2\t! div packed4D" %} |
kvn@4001 | 3748 | ins_encode %{ |
kvn@4001 | 3749 | bool vector256 = true; |
kvn@4001 | 3750 | __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3751 | %} |
kvn@4001 | 3752 | ins_pipe( pipe_slow ); |
kvn@4001 | 3753 | %} |
kvn@4001 | 3754 | |
kvn@4001 | 3755 | instruct vdiv4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3756 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3757 | match(Set dst (DivVD src (LoadVector mem))); |
kvn@4001 | 3758 | format %{ "vdivpd $dst,$src,$mem\t! div packed4D" %} |
kvn@4001 | 3759 | ins_encode %{ |
kvn@4001 | 3760 | bool vector256 = true; |
kvn@4001 | 3761 | __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3762 | %} |
kvn@4001 | 3763 | ins_pipe( pipe_slow ); |
kvn@4001 | 3764 | %} |
kvn@4001 | 3765 | |
kvn@4134 | 3766 | // ------------------------------ Shift --------------------------------------- |
kvn@4134 | 3767 | |
kvn@4134 | 3768 | // Left and right shift count vectors are the same on x86 |
kvn@4134 | 3769 | // (only lowest bits of xmm reg are used for count). |
kvn@4134 | 3770 | instruct vshiftcnt(vecS dst, rRegI cnt) %{ |
kvn@4134 | 3771 | match(Set dst (LShiftCntV cnt)); |
kvn@4134 | 3772 | match(Set dst (RShiftCntV cnt)); |
kvn@4134 | 3773 | format %{ "movd $dst,$cnt\t! load shift count" %} |
kvn@4134 | 3774 | ins_encode %{ |
kvn@4134 | 3775 | __ movdl($dst$$XMMRegister, $cnt$$Register); |
kvn@4134 | 3776 | %} |
kvn@4134 | 3777 | ins_pipe( pipe_slow ); |
kvn@4134 | 3778 | %} |
kvn@4134 | 3779 | |
kvn@4001 | 3780 | // ------------------------------ LeftShift ----------------------------------- |
kvn@4001 | 3781 | |
kvn@4001 | 3782 | // Shorts/Chars vector left shift |
kvn@4134 | 3783 | instruct vsll2S(vecS dst, vecS shift) %{ |
kvn@4001 | 3784 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3785 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3786 | format %{ "psllw $dst,$shift\t! left shift packed2S" %} |
kvn@4001 | 3787 | ins_encode %{ |
kvn@4001 | 3788 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3789 | %} |
kvn@4001 | 3790 | ins_pipe( pipe_slow ); |
kvn@4001 | 3791 | %} |
kvn@4001 | 3792 | |
kvn@4001 | 3793 | instruct vsll2S_imm(vecS dst, immI8 shift) %{ |
kvn@4001 | 3794 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3795 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3796 | format %{ "psllw $dst,$shift\t! left shift packed2S" %} |
kvn@4001 | 3797 | ins_encode %{ |
kvn@4001 | 3798 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3799 | %} |
kvn@4001 | 3800 | ins_pipe( pipe_slow ); |
kvn@4001 | 3801 | %} |
kvn@4001 | 3802 | |
kvn@4134 | 3803 | instruct vsll2S_reg(vecS dst, vecS src, vecS shift) %{ |
kvn@4001 | 3804 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3805 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3806 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} |
kvn@4001 | 3807 | ins_encode %{ |
kvn@4001 | 3808 | bool vector256 = false; |
kvn@4001 | 3809 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3810 | %} |
kvn@4001 | 3811 | ins_pipe( pipe_slow ); |
kvn@4001 | 3812 | %} |
kvn@4001 | 3813 | |
kvn@4001 | 3814 | instruct vsll2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ |
kvn@4001 | 3815 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3816 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3817 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} |
kvn@4001 | 3818 | ins_encode %{ |
kvn@4001 | 3819 | bool vector256 = false; |
kvn@4001 | 3820 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3821 | %} |
kvn@4001 | 3822 | ins_pipe( pipe_slow ); |
kvn@4001 | 3823 | %} |
kvn@4001 | 3824 | |
kvn@4134 | 3825 | instruct vsll4S(vecD dst, vecS shift) %{ |
kvn@4001 | 3826 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3827 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3828 | format %{ "psllw $dst,$shift\t! left shift packed4S" %} |
kvn@4001 | 3829 | ins_encode %{ |
kvn@4001 | 3830 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3831 | %} |
kvn@4001 | 3832 | ins_pipe( pipe_slow ); |
kvn@4001 | 3833 | %} |
kvn@4001 | 3834 | |
kvn@4001 | 3835 | instruct vsll4S_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 3836 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3837 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3838 | format %{ "psllw $dst,$shift\t! left shift packed4S" %} |
kvn@4001 | 3839 | ins_encode %{ |
kvn@4001 | 3840 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3841 | %} |
kvn@4001 | 3842 | ins_pipe( pipe_slow ); |
kvn@4001 | 3843 | %} |
kvn@4001 | 3844 | |
kvn@4134 | 3845 | instruct vsll4S_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4001 | 3846 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3847 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3848 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} |
kvn@4001 | 3849 | ins_encode %{ |
kvn@4001 | 3850 | bool vector256 = false; |
kvn@4001 | 3851 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3852 | %} |
kvn@4001 | 3853 | ins_pipe( pipe_slow ); |
kvn@4001 | 3854 | %} |
kvn@4001 | 3855 | |
kvn@4001 | 3856 | instruct vsll4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 3857 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3858 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3859 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} |
kvn@4001 | 3860 | ins_encode %{ |
kvn@4001 | 3861 | bool vector256 = false; |
kvn@4001 | 3862 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3863 | %} |
kvn@4001 | 3864 | ins_pipe( pipe_slow ); |
kvn@4001 | 3865 | %} |
kvn@4001 | 3866 | |
kvn@4134 | 3867 | instruct vsll8S(vecX dst, vecS shift) %{ |
kvn@4001 | 3868 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3869 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3870 | format %{ "psllw $dst,$shift\t! left shift packed8S" %} |
kvn@4001 | 3871 | ins_encode %{ |
kvn@4001 | 3872 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3873 | %} |
kvn@4001 | 3874 | ins_pipe( pipe_slow ); |
kvn@4001 | 3875 | %} |
kvn@4001 | 3876 | |
kvn@4001 | 3877 | instruct vsll8S_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 3878 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3879 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3880 | format %{ "psllw $dst,$shift\t! left shift packed8S" %} |
kvn@4001 | 3881 | ins_encode %{ |
kvn@4001 | 3882 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3883 | %} |
kvn@4001 | 3884 | ins_pipe( pipe_slow ); |
kvn@4001 | 3885 | %} |
kvn@4001 | 3886 | |
kvn@4134 | 3887 | instruct vsll8S_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 3888 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3889 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3890 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} |
kvn@4001 | 3891 | ins_encode %{ |
kvn@4001 | 3892 | bool vector256 = false; |
kvn@4001 | 3893 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3894 | %} |
kvn@4001 | 3895 | ins_pipe( pipe_slow ); |
kvn@4001 | 3896 | %} |
kvn@4001 | 3897 | |
kvn@4001 | 3898 | instruct vsll8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 3899 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3900 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3901 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} |
kvn@4001 | 3902 | ins_encode %{ |
kvn@4001 | 3903 | bool vector256 = false; |
kvn@4001 | 3904 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3905 | %} |
kvn@4001 | 3906 | ins_pipe( pipe_slow ); |
kvn@4001 | 3907 | %} |
kvn@4001 | 3908 | |
kvn@4134 | 3909 | instruct vsll16S_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 3910 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3911 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3912 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} |
kvn@4001 | 3913 | ins_encode %{ |
kvn@4001 | 3914 | bool vector256 = true; |
kvn@4001 | 3915 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3916 | %} |
kvn@4001 | 3917 | ins_pipe( pipe_slow ); |
kvn@4001 | 3918 | %} |
kvn@4001 | 3919 | |
kvn@4001 | 3920 | instruct vsll16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 3921 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3922 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3923 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} |
kvn@4001 | 3924 | ins_encode %{ |
kvn@4001 | 3925 | bool vector256 = true; |
kvn@4001 | 3926 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3927 | %} |
kvn@4001 | 3928 | ins_pipe( pipe_slow ); |
kvn@4001 | 3929 | %} |
kvn@4001 | 3930 | |
kvn@4001 | 3931 | // Integers vector left shift |
kvn@4134 | 3932 | instruct vsll2I(vecD dst, vecS shift) %{ |
kvn@4001 | 3933 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3934 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3935 | format %{ "pslld $dst,$shift\t! left shift packed2I" %} |
kvn@4001 | 3936 | ins_encode %{ |
kvn@4001 | 3937 | __ pslld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3938 | %} |
kvn@4001 | 3939 | ins_pipe( pipe_slow ); |
kvn@4001 | 3940 | %} |
kvn@4001 | 3941 | |
kvn@4001 | 3942 | instruct vsll2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 3943 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3944 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3945 | format %{ "pslld $dst,$shift\t! left shift packed2I" %} |
kvn@4001 | 3946 | ins_encode %{ |
kvn@4001 | 3947 | __ pslld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3948 | %} |
kvn@4001 | 3949 | ins_pipe( pipe_slow ); |
kvn@4001 | 3950 | %} |
kvn@4001 | 3951 | |
kvn@4134 | 3952 | instruct vsll2I_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4001 | 3953 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3954 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3955 | format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} |
kvn@4001 | 3956 | ins_encode %{ |
kvn@4001 | 3957 | bool vector256 = false; |
kvn@4001 | 3958 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3959 | %} |
kvn@4001 | 3960 | ins_pipe( pipe_slow ); |
kvn@4001 | 3961 | %} |
kvn@4001 | 3962 | |
kvn@4001 | 3963 | instruct vsll2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 3964 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3965 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3966 | format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} |
kvn@4001 | 3967 | ins_encode %{ |
kvn@4001 | 3968 | bool vector256 = false; |
kvn@4001 | 3969 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3970 | %} |
kvn@4001 | 3971 | ins_pipe( pipe_slow ); |
kvn@4001 | 3972 | %} |
kvn@4001 | 3973 | |
kvn@4134 | 3974 | instruct vsll4I(vecX dst, vecS shift) %{ |
kvn@4001 | 3975 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3976 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3977 | format %{ "pslld $dst,$shift\t! left shift packed4I" %} |
kvn@4001 | 3978 | ins_encode %{ |
kvn@4001 | 3979 | __ pslld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3980 | %} |
kvn@4001 | 3981 | ins_pipe( pipe_slow ); |
kvn@4001 | 3982 | %} |
kvn@4001 | 3983 | |
kvn@4001 | 3984 | instruct vsll4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 3985 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3986 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3987 | format %{ "pslld $dst,$shift\t! left shift packed4I" %} |
kvn@4001 | 3988 | ins_encode %{ |
kvn@4001 | 3989 | __ pslld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3990 | %} |
kvn@4001 | 3991 | ins_pipe( pipe_slow ); |
kvn@4001 | 3992 | %} |
kvn@4001 | 3993 | |
kvn@4134 | 3994 | instruct vsll4I_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 3995 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3996 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3997 | format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} |
kvn@4001 | 3998 | ins_encode %{ |
kvn@4001 | 3999 | bool vector256 = false; |
kvn@4001 | 4000 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4001 | %} |
kvn@4001 | 4002 | ins_pipe( pipe_slow ); |
kvn@4001 | 4003 | %} |
kvn@4001 | 4004 | |
kvn@4001 | 4005 | instruct vsll4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4006 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4007 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 4008 | format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} |
kvn@4001 | 4009 | ins_encode %{ |
kvn@4001 | 4010 | bool vector256 = false; |
kvn@4001 | 4011 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4012 | %} |
kvn@4001 | 4013 | ins_pipe( pipe_slow ); |
kvn@4001 | 4014 | %} |
kvn@4001 | 4015 | |
kvn@4134 | 4016 | instruct vsll8I_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4017 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4018 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 4019 | format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} |
kvn@4001 | 4020 | ins_encode %{ |
kvn@4001 | 4021 | bool vector256 = true; |
kvn@4001 | 4022 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4023 | %} |
kvn@4001 | 4024 | ins_pipe( pipe_slow ); |
kvn@4001 | 4025 | %} |
kvn@4001 | 4026 | |
kvn@4001 | 4027 | instruct vsll8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4028 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4029 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 4030 | format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} |
kvn@4001 | 4031 | ins_encode %{ |
kvn@4001 | 4032 | bool vector256 = true; |
kvn@4001 | 4033 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4034 | %} |
kvn@4001 | 4035 | ins_pipe( pipe_slow ); |
kvn@4001 | 4036 | %} |
kvn@4001 | 4037 | |
kvn@4001 | 4038 | // Longs vector left shift |
kvn@4134 | 4039 | instruct vsll2L(vecX dst, vecS shift) %{ |
kvn@4001 | 4040 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4041 | match(Set dst (LShiftVL dst shift)); |
kvn@4001 | 4042 | format %{ "psllq $dst,$shift\t! left shift packed2L" %} |
kvn@4001 | 4043 | ins_encode %{ |
kvn@4001 | 4044 | __ psllq($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4045 | %} |
kvn@4001 | 4046 | ins_pipe( pipe_slow ); |
kvn@4001 | 4047 | %} |
kvn@4001 | 4048 | |
kvn@4001 | 4049 | instruct vsll2L_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4050 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4051 | match(Set dst (LShiftVL dst shift)); |
kvn@4001 | 4052 | format %{ "psllq $dst,$shift\t! left shift packed2L" %} |
kvn@4001 | 4053 | ins_encode %{ |
kvn@4001 | 4054 | __ psllq($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4055 | %} |
kvn@4001 | 4056 | ins_pipe( pipe_slow ); |
kvn@4001 | 4057 | %} |
kvn@4001 | 4058 | |
kvn@4134 | 4059 | instruct vsll2L_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 4060 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4061 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4062 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} |
kvn@4001 | 4063 | ins_encode %{ |
kvn@4001 | 4064 | bool vector256 = false; |
kvn@4001 | 4065 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4066 | %} |
kvn@4001 | 4067 | ins_pipe( pipe_slow ); |
kvn@4001 | 4068 | %} |
kvn@4001 | 4069 | |
kvn@4001 | 4070 | instruct vsll2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4071 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4072 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4073 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} |
kvn@4001 | 4074 | ins_encode %{ |
kvn@4001 | 4075 | bool vector256 = false; |
kvn@4001 | 4076 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4077 | %} |
kvn@4001 | 4078 | ins_pipe( pipe_slow ); |
kvn@4001 | 4079 | %} |
kvn@4001 | 4080 | |
kvn@4134 | 4081 | instruct vsll4L_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4082 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4083 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4084 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} |
kvn@4001 | 4085 | ins_encode %{ |
kvn@4001 | 4086 | bool vector256 = true; |
kvn@4001 | 4087 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4088 | %} |
kvn@4001 | 4089 | ins_pipe( pipe_slow ); |
kvn@4001 | 4090 | %} |
kvn@4001 | 4091 | |
kvn@4001 | 4092 | instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4093 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4094 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4095 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} |
kvn@4001 | 4096 | ins_encode %{ |
kvn@4001 | 4097 | bool vector256 = true; |
kvn@4001 | 4098 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4099 | %} |
kvn@4001 | 4100 | ins_pipe( pipe_slow ); |
kvn@4001 | 4101 | %} |
kvn@4001 | 4102 | |
kvn@4001 | 4103 | // ----------------------- LogicalRightShift ----------------------------------- |
kvn@4001 | 4104 | |
kvn@4204 | 4105 | // Shorts vector logical right shift produces incorrect Java result |
kvn@4001 | 4106 | // for negative data because java code convert short value into int with |
kvn@4204 | 4107 | // sign extension before a shift. But char vectors are fine since chars are |
kvn@4204 | 4108 | // unsigned values. |
kvn@4204 | 4109 | |
kvn@4204 | 4110 | instruct vsrl2S(vecS dst, vecS shift) %{ |
kvn@4204 | 4111 | predicate(n->as_Vector()->length() == 2); |
kvn@4204 | 4112 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4113 | format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %} |
kvn@4204 | 4114 | ins_encode %{ |
kvn@4204 | 4115 | __ psrlw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4204 | 4116 | %} |
kvn@4204 | 4117 | ins_pipe( pipe_slow ); |
kvn@4204 | 4118 | %} |
kvn@4204 | 4119 | |
kvn@4204 | 4120 | instruct vsrl2S_imm(vecS dst, immI8 shift) %{ |
kvn@4204 | 4121 | predicate(n->as_Vector()->length() == 2); |
kvn@4204 | 4122 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4123 | format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %} |
kvn@4204 | 4124 | ins_encode %{ |
kvn@4204 | 4125 | __ psrlw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4204 | 4126 | %} |
kvn@4204 | 4127 | ins_pipe( pipe_slow ); |
kvn@4204 | 4128 | %} |
kvn@4204 | 4129 | |
kvn@4204 | 4130 | instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{ |
kvn@4204 | 4131 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4204 | 4132 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4133 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %} |
kvn@4204 | 4134 | ins_encode %{ |
kvn@4204 | 4135 | bool vector256 = false; |
kvn@4204 | 4136 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4204 | 4137 | %} |
kvn@4204 | 4138 | ins_pipe( pipe_slow ); |
kvn@4204 | 4139 | %} |
kvn@4204 | 4140 | |
kvn@4204 | 4141 | instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ |
kvn@4204 | 4142 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4204 | 4143 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4144 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %} |
kvn@4204 | 4145 | ins_encode %{ |
kvn@4204 | 4146 | bool vector256 = false; |
kvn@4204 | 4147 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4204 | 4148 | %} |
kvn@4204 | 4149 | ins_pipe( pipe_slow ); |
kvn@4204 | 4150 | %} |
kvn@4204 | 4151 | |
kvn@4204 | 4152 | instruct vsrl4S(vecD dst, vecS shift) %{ |
kvn@4204 | 4153 | predicate(n->as_Vector()->length() == 4); |
kvn@4204 | 4154 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4155 | format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %} |
kvn@4204 | 4156 | ins_encode %{ |
kvn@4204 | 4157 | __ psrlw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4204 | 4158 | %} |
kvn@4204 | 4159 | ins_pipe( pipe_slow ); |
kvn@4204 | 4160 | %} |
kvn@4204 | 4161 | |
kvn@4204 | 4162 | instruct vsrl4S_imm(vecD dst, immI8 shift) %{ |
kvn@4204 | 4163 | predicate(n->as_Vector()->length() == 4); |
kvn@4204 | 4164 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4165 | format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %} |
kvn@4204 | 4166 | ins_encode %{ |
kvn@4204 | 4167 | __ psrlw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4204 | 4168 | %} |
kvn@4204 | 4169 | ins_pipe( pipe_slow ); |
kvn@4204 | 4170 | %} |
kvn@4204 | 4171 | |
kvn@4204 | 4172 | instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4204 | 4173 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4204 | 4174 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4175 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %} |
kvn@4204 | 4176 | ins_encode %{ |
kvn@4204 | 4177 | bool vector256 = false; |
kvn@4204 | 4178 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4204 | 4179 | %} |
kvn@4204 | 4180 | ins_pipe( pipe_slow ); |
kvn@4204 | 4181 | %} |
kvn@4204 | 4182 | |
kvn@4204 | 4183 | instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4204 | 4184 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4204 | 4185 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4186 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %} |
kvn@4204 | 4187 | ins_encode %{ |
kvn@4204 | 4188 | bool vector256 = false; |
kvn@4204 | 4189 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4204 | 4190 | %} |
kvn@4204 | 4191 | ins_pipe( pipe_slow ); |
kvn@4204 | 4192 | %} |
kvn@4204 | 4193 | |
kvn@4204 | 4194 | instruct vsrl8S(vecX dst, vecS shift) %{ |
kvn@4204 | 4195 | predicate(n->as_Vector()->length() == 8); |
kvn@4204 | 4196 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4197 | format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %} |
kvn@4204 | 4198 | ins_encode %{ |
kvn@4204 | 4199 | __ psrlw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4204 | 4200 | %} |
kvn@4204 | 4201 | ins_pipe( pipe_slow ); |
kvn@4204 | 4202 | %} |
kvn@4204 | 4203 | |
kvn@4204 | 4204 | instruct vsrl8S_imm(vecX dst, immI8 shift) %{ |
kvn@4204 | 4205 | predicate(n->as_Vector()->length() == 8); |
kvn@4204 | 4206 | match(Set dst (URShiftVS dst shift)); |
kvn@4204 | 4207 | format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %} |
kvn@4204 | 4208 | ins_encode %{ |
kvn@4204 | 4209 | __ psrlw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4204 | 4210 | %} |
kvn@4204 | 4211 | ins_pipe( pipe_slow ); |
kvn@4204 | 4212 | %} |
kvn@4204 | 4213 | |
kvn@4204 | 4214 | instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4204 | 4215 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4204 | 4216 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4217 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %} |
kvn@4204 | 4218 | ins_encode %{ |
kvn@4204 | 4219 | bool vector256 = false; |
kvn@4204 | 4220 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4204 | 4221 | %} |
kvn@4204 | 4222 | ins_pipe( pipe_slow ); |
kvn@4204 | 4223 | %} |
kvn@4204 | 4224 | |
kvn@4204 | 4225 | instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4204 | 4226 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4204 | 4227 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4228 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %} |
kvn@4204 | 4229 | ins_encode %{ |
kvn@4204 | 4230 | bool vector256 = false; |
kvn@4204 | 4231 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4204 | 4232 | %} |
kvn@4204 | 4233 | ins_pipe( pipe_slow ); |
kvn@4204 | 4234 | %} |
kvn@4204 | 4235 | |
kvn@4204 | 4236 | instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4204 | 4237 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4204 | 4238 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4239 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %} |
kvn@4204 | 4240 | ins_encode %{ |
kvn@4204 | 4241 | bool vector256 = true; |
kvn@4204 | 4242 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4204 | 4243 | %} |
kvn@4204 | 4244 | ins_pipe( pipe_slow ); |
kvn@4204 | 4245 | %} |
kvn@4204 | 4246 | |
kvn@4204 | 4247 | instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4204 | 4248 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4204 | 4249 | match(Set dst (URShiftVS src shift)); |
kvn@4204 | 4250 | format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %} |
kvn@4204 | 4251 | ins_encode %{ |
kvn@4204 | 4252 | bool vector256 = true; |
kvn@4204 | 4253 | __ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4204 | 4254 | %} |
kvn@4204 | 4255 | ins_pipe( pipe_slow ); |
kvn@4204 | 4256 | %} |
kvn@4001 | 4257 | |
kvn@4001 | 4258 | // Integers vector logical right shift |
kvn@4134 | 4259 | instruct vsrl2I(vecD dst, vecS shift) %{ |
kvn@4001 | 4260 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4261 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4262 | format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4263 | ins_encode %{ |
kvn@4001 | 4264 | __ psrld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4265 | %} |
kvn@4001 | 4266 | ins_pipe( pipe_slow ); |
kvn@4001 | 4267 | %} |
kvn@4001 | 4268 | |
kvn@4001 | 4269 | instruct vsrl2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4270 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4271 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4272 | format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4273 | ins_encode %{ |
kvn@4001 | 4274 | __ psrld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4275 | %} |
kvn@4001 | 4276 | ins_pipe( pipe_slow ); |
kvn@4001 | 4277 | %} |
kvn@4001 | 4278 | |
kvn@4134 | 4279 | instruct vsrl2I_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4001 | 4280 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4281 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4282 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4283 | ins_encode %{ |
kvn@4001 | 4284 | bool vector256 = false; |
kvn@4001 | 4285 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4286 | %} |
kvn@4001 | 4287 | ins_pipe( pipe_slow ); |
kvn@4001 | 4288 | %} |
kvn@4001 | 4289 | |
kvn@4001 | 4290 | instruct vsrl2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4291 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4292 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4293 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4294 | ins_encode %{ |
kvn@4001 | 4295 | bool vector256 = false; |
kvn@4001 | 4296 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4297 | %} |
kvn@4001 | 4298 | ins_pipe( pipe_slow ); |
kvn@4001 | 4299 | %} |
kvn@4001 | 4300 | |
kvn@4134 | 4301 | instruct vsrl4I(vecX dst, vecS shift) %{ |
kvn@4001 | 4302 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4303 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4304 | format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4305 | ins_encode %{ |
kvn@4001 | 4306 | __ psrld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4307 | %} |
kvn@4001 | 4308 | ins_pipe( pipe_slow ); |
kvn@4001 | 4309 | %} |
kvn@4001 | 4310 | |
kvn@4001 | 4311 | instruct vsrl4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4312 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4313 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4314 | format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4315 | ins_encode %{ |
kvn@4001 | 4316 | __ psrld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4317 | %} |
kvn@4001 | 4318 | ins_pipe( pipe_slow ); |
kvn@4001 | 4319 | %} |
kvn@4001 | 4320 | |
kvn@4134 | 4321 | instruct vsrl4I_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 4322 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4323 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4324 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4325 | ins_encode %{ |
kvn@4001 | 4326 | bool vector256 = false; |
kvn@4001 | 4327 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4328 | %} |
kvn@4001 | 4329 | ins_pipe( pipe_slow ); |
kvn@4001 | 4330 | %} |
kvn@4001 | 4331 | |
kvn@4001 | 4332 | instruct vsrl4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4333 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4334 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4335 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4336 | ins_encode %{ |
kvn@4001 | 4337 | bool vector256 = false; |
kvn@4001 | 4338 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4339 | %} |
kvn@4001 | 4340 | ins_pipe( pipe_slow ); |
kvn@4001 | 4341 | %} |
kvn@4001 | 4342 | |
kvn@4134 | 4343 | instruct vsrl8I_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4344 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4345 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4346 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} |
kvn@4001 | 4347 | ins_encode %{ |
kvn@4001 | 4348 | bool vector256 = true; |
kvn@4001 | 4349 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4350 | %} |
kvn@4001 | 4351 | ins_pipe( pipe_slow ); |
kvn@4001 | 4352 | %} |
kvn@4001 | 4353 | |
kvn@4001 | 4354 | instruct vsrl8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4355 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4356 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4357 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} |
kvn@4001 | 4358 | ins_encode %{ |
kvn@4001 | 4359 | bool vector256 = true; |
kvn@4001 | 4360 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4361 | %} |
kvn@4001 | 4362 | ins_pipe( pipe_slow ); |
kvn@4001 | 4363 | %} |
kvn@4001 | 4364 | |
kvn@4001 | 4365 | // Longs vector logical right shift |
kvn@4134 | 4366 | instruct vsrl2L(vecX dst, vecS shift) %{ |
kvn@4001 | 4367 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4368 | match(Set dst (URShiftVL dst shift)); |
kvn@4001 | 4369 | format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4370 | ins_encode %{ |
kvn@4001 | 4371 | __ psrlq($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4372 | %} |
kvn@4001 | 4373 | ins_pipe( pipe_slow ); |
kvn@4001 | 4374 | %} |
kvn@4001 | 4375 | |
kvn@4001 | 4376 | instruct vsrl2L_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4377 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4378 | match(Set dst (URShiftVL dst shift)); |
kvn@4001 | 4379 | format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4380 | ins_encode %{ |
kvn@4001 | 4381 | __ psrlq($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4382 | %} |
kvn@4001 | 4383 | ins_pipe( pipe_slow ); |
kvn@4001 | 4384 | %} |
kvn@4001 | 4385 | |
kvn@4134 | 4386 | instruct vsrl2L_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 4387 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4388 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4389 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4390 | ins_encode %{ |
kvn@4001 | 4391 | bool vector256 = false; |
kvn@4001 | 4392 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4393 | %} |
kvn@4001 | 4394 | ins_pipe( pipe_slow ); |
kvn@4001 | 4395 | %} |
kvn@4001 | 4396 | |
kvn@4001 | 4397 | instruct vsrl2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4398 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4399 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4400 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4401 | ins_encode %{ |
kvn@4001 | 4402 | bool vector256 = false; |
kvn@4001 | 4403 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4404 | %} |
kvn@4001 | 4405 | ins_pipe( pipe_slow ); |
kvn@4001 | 4406 | %} |
kvn@4001 | 4407 | |
kvn@4134 | 4408 | instruct vsrl4L_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4409 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4410 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4411 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} |
kvn@4001 | 4412 | ins_encode %{ |
kvn@4001 | 4413 | bool vector256 = true; |
kvn@4001 | 4414 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4415 | %} |
kvn@4001 | 4416 | ins_pipe( pipe_slow ); |
kvn@4001 | 4417 | %} |
kvn@4001 | 4418 | |
kvn@4001 | 4419 | instruct vsrl4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4420 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4421 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4422 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} |
kvn@4001 | 4423 | ins_encode %{ |
kvn@4001 | 4424 | bool vector256 = true; |
kvn@4001 | 4425 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4426 | %} |
kvn@4001 | 4427 | ins_pipe( pipe_slow ); |
kvn@4001 | 4428 | %} |
kvn@4001 | 4429 | |
kvn@4001 | 4430 | // ------------------- ArithmeticRightShift ----------------------------------- |
kvn@4001 | 4431 | |
kvn@4001 | 4432 | // Shorts/Chars vector arithmetic right shift |
kvn@4134 | 4433 | instruct vsra2S(vecS dst, vecS shift) %{ |
kvn@4001 | 4434 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4435 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4436 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4437 | ins_encode %{ |
kvn@4001 | 4438 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4439 | %} |
kvn@4001 | 4440 | ins_pipe( pipe_slow ); |
kvn@4001 | 4441 | %} |
kvn@4001 | 4442 | |
kvn@4001 | 4443 | instruct vsra2S_imm(vecS dst, immI8 shift) %{ |
kvn@4001 | 4444 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4445 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4446 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4447 | ins_encode %{ |
kvn@4001 | 4448 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4449 | %} |
kvn@4001 | 4450 | ins_pipe( pipe_slow ); |
kvn@4001 | 4451 | %} |
kvn@4001 | 4452 | |
kvn@4134 | 4453 | instruct vsra2S_reg(vecS dst, vecS src, vecS shift) %{ |
kvn@4001 | 4454 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4455 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4456 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4457 | ins_encode %{ |
kvn@4001 | 4458 | bool vector256 = false; |
kvn@4001 | 4459 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4460 | %} |
kvn@4001 | 4461 | ins_pipe( pipe_slow ); |
kvn@4001 | 4462 | %} |
kvn@4001 | 4463 | |
kvn@4001 | 4464 | instruct vsra2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ |
kvn@4001 | 4465 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4466 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4467 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4468 | ins_encode %{ |
kvn@4001 | 4469 | bool vector256 = false; |
kvn@4001 | 4470 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4471 | %} |
kvn@4001 | 4472 | ins_pipe( pipe_slow ); |
kvn@4001 | 4473 | %} |
kvn@4001 | 4474 | |
kvn@4134 | 4475 | instruct vsra4S(vecD dst, vecS shift) %{ |
kvn@4001 | 4476 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4477 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4478 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4479 | ins_encode %{ |
kvn@4001 | 4480 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4481 | %} |
kvn@4001 | 4482 | ins_pipe( pipe_slow ); |
kvn@4001 | 4483 | %} |
kvn@4001 | 4484 | |
kvn@4001 | 4485 | instruct vsra4S_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4486 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4487 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4488 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4489 | ins_encode %{ |
kvn@4001 | 4490 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4491 | %} |
kvn@4001 | 4492 | ins_pipe( pipe_slow ); |
kvn@4001 | 4493 | %} |
kvn@4001 | 4494 | |
kvn@4134 | 4495 | instruct vsra4S_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4001 | 4496 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4497 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4498 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4499 | ins_encode %{ |
kvn@4001 | 4500 | bool vector256 = false; |
kvn@4001 | 4501 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4502 | %} |
kvn@4001 | 4503 | ins_pipe( pipe_slow ); |
kvn@4001 | 4504 | %} |
kvn@4001 | 4505 | |
kvn@4001 | 4506 | instruct vsra4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4507 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4508 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4509 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4510 | ins_encode %{ |
kvn@4001 | 4511 | bool vector256 = false; |
kvn@4001 | 4512 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4513 | %} |
kvn@4001 | 4514 | ins_pipe( pipe_slow ); |
kvn@4001 | 4515 | %} |
kvn@4001 | 4516 | |
kvn@4134 | 4517 | instruct vsra8S(vecX dst, vecS shift) %{ |
kvn@4001 | 4518 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 4519 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4520 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4521 | ins_encode %{ |
kvn@4001 | 4522 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4523 | %} |
kvn@4001 | 4524 | ins_pipe( pipe_slow ); |
kvn@4001 | 4525 | %} |
kvn@4001 | 4526 | |
kvn@4001 | 4527 | instruct vsra8S_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4528 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 4529 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4530 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4531 | ins_encode %{ |
kvn@4001 | 4532 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4533 | %} |
kvn@4001 | 4534 | ins_pipe( pipe_slow ); |
kvn@4001 | 4535 | %} |
kvn@4001 | 4536 | |
kvn@4134 | 4537 | instruct vsra8S_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 4538 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 4539 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4540 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4541 | ins_encode %{ |
kvn@4001 | 4542 | bool vector256 = false; |
kvn@4001 | 4543 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4544 | %} |
kvn@4001 | 4545 | ins_pipe( pipe_slow ); |
kvn@4001 | 4546 | %} |
kvn@4001 | 4547 | |
kvn@4001 | 4548 | instruct vsra8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4549 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 4550 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4551 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4552 | ins_encode %{ |
kvn@4001 | 4553 | bool vector256 = false; |
kvn@4001 | 4554 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4555 | %} |
kvn@4001 | 4556 | ins_pipe( pipe_slow ); |
kvn@4001 | 4557 | %} |
kvn@4001 | 4558 | |
kvn@4134 | 4559 | instruct vsra16S_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4560 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 4561 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4562 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} |
kvn@4001 | 4563 | ins_encode %{ |
kvn@4001 | 4564 | bool vector256 = true; |
kvn@4001 | 4565 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4566 | %} |
kvn@4001 | 4567 | ins_pipe( pipe_slow ); |
kvn@4001 | 4568 | %} |
kvn@4001 | 4569 | |
kvn@4001 | 4570 | instruct vsra16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4571 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 4572 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4573 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} |
kvn@4001 | 4574 | ins_encode %{ |
kvn@4001 | 4575 | bool vector256 = true; |
kvn@4001 | 4576 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4577 | %} |
kvn@4001 | 4578 | ins_pipe( pipe_slow ); |
kvn@4001 | 4579 | %} |
kvn@4001 | 4580 | |
kvn@4001 | 4581 | // Integers vector arithmetic right shift |
kvn@4134 | 4582 | instruct vsra2I(vecD dst, vecS shift) %{ |
kvn@4001 | 4583 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4584 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4585 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4586 | ins_encode %{ |
kvn@4001 | 4587 | __ psrad($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4588 | %} |
kvn@4001 | 4589 | ins_pipe( pipe_slow ); |
kvn@4001 | 4590 | %} |
kvn@4001 | 4591 | |
kvn@4001 | 4592 | instruct vsra2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4593 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4594 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4595 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4596 | ins_encode %{ |
kvn@4001 | 4597 | __ psrad($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4598 | %} |
kvn@4001 | 4599 | ins_pipe( pipe_slow ); |
kvn@4001 | 4600 | %} |
kvn@4001 | 4601 | |
kvn@4134 | 4602 | instruct vsra2I_reg(vecD dst, vecD src, vecS shift) %{ |
kvn@4001 | 4603 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4604 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4605 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4606 | ins_encode %{ |
kvn@4001 | 4607 | bool vector256 = false; |
kvn@4001 | 4608 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4609 | %} |
kvn@4001 | 4610 | ins_pipe( pipe_slow ); |
kvn@4001 | 4611 | %} |
kvn@4001 | 4612 | |
kvn@4001 | 4613 | instruct vsra2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4614 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4615 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4616 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4617 | ins_encode %{ |
kvn@4001 | 4618 | bool vector256 = false; |
kvn@4001 | 4619 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4620 | %} |
kvn@4001 | 4621 | ins_pipe( pipe_slow ); |
kvn@4001 | 4622 | %} |
kvn@4001 | 4623 | |
kvn@4134 | 4624 | instruct vsra4I(vecX dst, vecS shift) %{ |
kvn@4001 | 4625 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4626 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4627 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4628 | ins_encode %{ |
kvn@4001 | 4629 | __ psrad($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4630 | %} |
kvn@4001 | 4631 | ins_pipe( pipe_slow ); |
kvn@4001 | 4632 | %} |
kvn@4001 | 4633 | |
kvn@4001 | 4634 | instruct vsra4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4635 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4636 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4637 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4638 | ins_encode %{ |
kvn@4001 | 4639 | __ psrad($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4640 | %} |
kvn@4001 | 4641 | ins_pipe( pipe_slow ); |
kvn@4001 | 4642 | %} |
kvn@4001 | 4643 | |
kvn@4134 | 4644 | instruct vsra4I_reg(vecX dst, vecX src, vecS shift) %{ |
kvn@4001 | 4645 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4646 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4647 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4648 | ins_encode %{ |
kvn@4001 | 4649 | bool vector256 = false; |
kvn@4001 | 4650 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4651 | %} |
kvn@4001 | 4652 | ins_pipe( pipe_slow ); |
kvn@4001 | 4653 | %} |
kvn@4001 | 4654 | |
kvn@4001 | 4655 | instruct vsra4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4656 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4657 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4658 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4659 | ins_encode %{ |
kvn@4001 | 4660 | bool vector256 = false; |
kvn@4001 | 4661 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4662 | %} |
kvn@4001 | 4663 | ins_pipe( pipe_slow ); |
kvn@4001 | 4664 | %} |
kvn@4001 | 4665 | |
kvn@4134 | 4666 | instruct vsra8I_reg(vecY dst, vecY src, vecS shift) %{ |
kvn@4001 | 4667 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4668 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4669 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} |
kvn@4001 | 4670 | ins_encode %{ |
kvn@4001 | 4671 | bool vector256 = true; |
kvn@4001 | 4672 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4673 | %} |
kvn@4001 | 4674 | ins_pipe( pipe_slow ); |
kvn@4001 | 4675 | %} |
kvn@4001 | 4676 | |
kvn@4001 | 4677 | instruct vsra8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4678 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4679 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4680 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} |
kvn@4001 | 4681 | ins_encode %{ |
kvn@4001 | 4682 | bool vector256 = true; |
kvn@4001 | 4683 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4684 | %} |
kvn@4001 | 4685 | ins_pipe( pipe_slow ); |
kvn@4001 | 4686 | %} |
kvn@4001 | 4687 | |
kvn@4001 | 4688 | // There are no longs vector arithmetic right shift instructions. |
kvn@4001 | 4689 | |
kvn@4001 | 4690 | |
kvn@4001 | 4691 | // --------------------------------- AND -------------------------------------- |
kvn@4001 | 4692 | |
kvn@4001 | 4693 | instruct vand4B(vecS dst, vecS src) %{ |
kvn@4001 | 4694 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4695 | match(Set dst (AndV dst src)); |
kvn@4001 | 4696 | format %{ "pand $dst,$src\t! and vectors (4 bytes)" %} |
kvn@4001 | 4697 | ins_encode %{ |
kvn@4001 | 4698 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4699 | %} |
kvn@4001 | 4700 | ins_pipe( pipe_slow ); |
kvn@4001 | 4701 | %} |
kvn@4001 | 4702 | |
kvn@4001 | 4703 | instruct vand4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4704 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4705 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4706 | format %{ "vpand $dst,$src1,$src2\t! and vectors (4 bytes)" %} |
kvn@4001 | 4707 | ins_encode %{ |
kvn@4001 | 4708 | bool vector256 = false; |
kvn@4001 | 4709 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4710 | %} |
kvn@4001 | 4711 | ins_pipe( pipe_slow ); |
kvn@4001 | 4712 | %} |
kvn@4001 | 4713 | |
kvn@4001 | 4714 | instruct vand8B(vecD dst, vecD src) %{ |
kvn@4001 | 4715 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4716 | match(Set dst (AndV dst src)); |
kvn@4001 | 4717 | format %{ "pand $dst,$src\t! and vectors (8 bytes)" %} |
kvn@4001 | 4718 | ins_encode %{ |
kvn@4001 | 4719 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4720 | %} |
kvn@4001 | 4721 | ins_pipe( pipe_slow ); |
kvn@4001 | 4722 | %} |
kvn@4001 | 4723 | |
kvn@4001 | 4724 | instruct vand8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4725 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4726 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4727 | format %{ "vpand $dst,$src1,$src2\t! and vectors (8 bytes)" %} |
kvn@4001 | 4728 | ins_encode %{ |
kvn@4001 | 4729 | bool vector256 = false; |
kvn@4001 | 4730 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4731 | %} |
kvn@4001 | 4732 | ins_pipe( pipe_slow ); |
kvn@4001 | 4733 | %} |
kvn@4001 | 4734 | |
kvn@4001 | 4735 | instruct vand16B(vecX dst, vecX src) %{ |
kvn@4001 | 4736 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4737 | match(Set dst (AndV dst src)); |
kvn@4001 | 4738 | format %{ "pand $dst,$src\t! and vectors (16 bytes)" %} |
kvn@4001 | 4739 | ins_encode %{ |
kvn@4001 | 4740 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4741 | %} |
kvn@4001 | 4742 | ins_pipe( pipe_slow ); |
kvn@4001 | 4743 | %} |
kvn@4001 | 4744 | |
kvn@4001 | 4745 | instruct vand16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4746 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4747 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4748 | format %{ "vpand $dst,$src1,$src2\t! and vectors (16 bytes)" %} |
kvn@4001 | 4749 | ins_encode %{ |
kvn@4001 | 4750 | bool vector256 = false; |
kvn@4001 | 4751 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4752 | %} |
kvn@4001 | 4753 | ins_pipe( pipe_slow ); |
kvn@4001 | 4754 | %} |
kvn@4001 | 4755 | |
kvn@4001 | 4756 | instruct vand16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4757 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4758 | match(Set dst (AndV src (LoadVector mem))); |
kvn@4001 | 4759 | format %{ "vpand $dst,$src,$mem\t! and vectors (16 bytes)" %} |
kvn@4001 | 4760 | ins_encode %{ |
kvn@4001 | 4761 | bool vector256 = false; |
kvn@4001 | 4762 | __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4763 | %} |
kvn@4001 | 4764 | ins_pipe( pipe_slow ); |
kvn@4001 | 4765 | %} |
kvn@4001 | 4766 | |
kvn@4001 | 4767 | instruct vand32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4768 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4769 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4770 | format %{ "vpand $dst,$src1,$src2\t! and vectors (32 bytes)" %} |
kvn@4001 | 4771 | ins_encode %{ |
kvn@4001 | 4772 | bool vector256 = true; |
kvn@4001 | 4773 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4774 | %} |
kvn@4001 | 4775 | ins_pipe( pipe_slow ); |
kvn@4001 | 4776 | %} |
kvn@4001 | 4777 | |
kvn@4001 | 4778 | instruct vand32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4779 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4780 | match(Set dst (AndV src (LoadVector mem))); |
kvn@4001 | 4781 | format %{ "vpand $dst,$src,$mem\t! and vectors (32 bytes)" %} |
kvn@4001 | 4782 | ins_encode %{ |
kvn@4001 | 4783 | bool vector256 = true; |
kvn@4001 | 4784 | __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4785 | %} |
kvn@4001 | 4786 | ins_pipe( pipe_slow ); |
kvn@4001 | 4787 | %} |
kvn@4001 | 4788 | |
kvn@4001 | 4789 | // --------------------------------- OR --------------------------------------- |
kvn@4001 | 4790 | |
kvn@4001 | 4791 | instruct vor4B(vecS dst, vecS src) %{ |
kvn@4001 | 4792 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4793 | match(Set dst (OrV dst src)); |
kvn@4001 | 4794 | format %{ "por $dst,$src\t! or vectors (4 bytes)" %} |
kvn@4001 | 4795 | ins_encode %{ |
kvn@4001 | 4796 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4797 | %} |
kvn@4001 | 4798 | ins_pipe( pipe_slow ); |
kvn@4001 | 4799 | %} |
kvn@4001 | 4800 | |
kvn@4001 | 4801 | instruct vor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4802 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4803 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4804 | format %{ "vpor $dst,$src1,$src2\t! or vectors (4 bytes)" %} |
kvn@4001 | 4805 | ins_encode %{ |
kvn@4001 | 4806 | bool vector256 = false; |
kvn@4001 | 4807 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4808 | %} |
kvn@4001 | 4809 | ins_pipe( pipe_slow ); |
kvn@4001 | 4810 | %} |
kvn@4001 | 4811 | |
kvn@4001 | 4812 | instruct vor8B(vecD dst, vecD src) %{ |
kvn@4001 | 4813 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4814 | match(Set dst (OrV dst src)); |
kvn@4001 | 4815 | format %{ "por $dst,$src\t! or vectors (8 bytes)" %} |
kvn@4001 | 4816 | ins_encode %{ |
kvn@4001 | 4817 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4818 | %} |
kvn@4001 | 4819 | ins_pipe( pipe_slow ); |
kvn@4001 | 4820 | %} |
kvn@4001 | 4821 | |
kvn@4001 | 4822 | instruct vor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4823 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4824 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4825 | format %{ "vpor $dst,$src1,$src2\t! or vectors (8 bytes)" %} |
kvn@4001 | 4826 | ins_encode %{ |
kvn@4001 | 4827 | bool vector256 = false; |
kvn@4001 | 4828 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4829 | %} |
kvn@4001 | 4830 | ins_pipe( pipe_slow ); |
kvn@4001 | 4831 | %} |
kvn@4001 | 4832 | |
kvn@4001 | 4833 | instruct vor16B(vecX dst, vecX src) %{ |
kvn@4001 | 4834 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4835 | match(Set dst (OrV dst src)); |
kvn@4001 | 4836 | format %{ "por $dst,$src\t! or vectors (16 bytes)" %} |
kvn@4001 | 4837 | ins_encode %{ |
kvn@4001 | 4838 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4839 | %} |
kvn@4001 | 4840 | ins_pipe( pipe_slow ); |
kvn@4001 | 4841 | %} |
kvn@4001 | 4842 | |
kvn@4001 | 4843 | instruct vor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4844 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4845 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4846 | format %{ "vpor $dst,$src1,$src2\t! or vectors (16 bytes)" %} |
kvn@4001 | 4847 | ins_encode %{ |
kvn@4001 | 4848 | bool vector256 = false; |
kvn@4001 | 4849 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4850 | %} |
kvn@4001 | 4851 | ins_pipe( pipe_slow ); |
kvn@4001 | 4852 | %} |
kvn@4001 | 4853 | |
kvn@4001 | 4854 | instruct vor16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4855 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4856 | match(Set dst (OrV src (LoadVector mem))); |
kvn@4001 | 4857 | format %{ "vpor $dst,$src,$mem\t! or vectors (16 bytes)" %} |
kvn@4001 | 4858 | ins_encode %{ |
kvn@4001 | 4859 | bool vector256 = false; |
kvn@4001 | 4860 | __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4861 | %} |
kvn@4001 | 4862 | ins_pipe( pipe_slow ); |
kvn@4001 | 4863 | %} |
kvn@4001 | 4864 | |
kvn@4001 | 4865 | instruct vor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4866 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4867 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4868 | format %{ "vpor $dst,$src1,$src2\t! or vectors (32 bytes)" %} |
kvn@4001 | 4869 | ins_encode %{ |
kvn@4001 | 4870 | bool vector256 = true; |
kvn@4001 | 4871 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4872 | %} |
kvn@4001 | 4873 | ins_pipe( pipe_slow ); |
kvn@4001 | 4874 | %} |
kvn@4001 | 4875 | |
kvn@4001 | 4876 | instruct vor32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4877 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4878 | match(Set dst (OrV src (LoadVector mem))); |
kvn@4001 | 4879 | format %{ "vpor $dst,$src,$mem\t! or vectors (32 bytes)" %} |
kvn@4001 | 4880 | ins_encode %{ |
kvn@4001 | 4881 | bool vector256 = true; |
kvn@4001 | 4882 | __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4883 | %} |
kvn@4001 | 4884 | ins_pipe( pipe_slow ); |
kvn@4001 | 4885 | %} |
kvn@4001 | 4886 | |
kvn@4001 | 4887 | // --------------------------------- XOR -------------------------------------- |
kvn@4001 | 4888 | |
kvn@4001 | 4889 | instruct vxor4B(vecS dst, vecS src) %{ |
kvn@4001 | 4890 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4891 | match(Set dst (XorV dst src)); |
kvn@4001 | 4892 | format %{ "pxor $dst,$src\t! xor vectors (4 bytes)" %} |
kvn@4001 | 4893 | ins_encode %{ |
kvn@4001 | 4894 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4895 | %} |
kvn@4001 | 4896 | ins_pipe( pipe_slow ); |
kvn@4001 | 4897 | %} |
kvn@4001 | 4898 | |
kvn@4001 | 4899 | instruct vxor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4900 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4901 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4902 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (4 bytes)" %} |
kvn@4001 | 4903 | ins_encode %{ |
kvn@4001 | 4904 | bool vector256 = false; |
kvn@4001 | 4905 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4906 | %} |
kvn@4001 | 4907 | ins_pipe( pipe_slow ); |
kvn@4001 | 4908 | %} |
kvn@4001 | 4909 | |
kvn@4001 | 4910 | instruct vxor8B(vecD dst, vecD src) %{ |
kvn@4001 | 4911 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4912 | match(Set dst (XorV dst src)); |
kvn@4001 | 4913 | format %{ "pxor $dst,$src\t! xor vectors (8 bytes)" %} |
kvn@4001 | 4914 | ins_encode %{ |
kvn@4001 | 4915 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4916 | %} |
kvn@4001 | 4917 | ins_pipe( pipe_slow ); |
kvn@4001 | 4918 | %} |
kvn@4001 | 4919 | |
kvn@4001 | 4920 | instruct vxor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4921 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4922 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4923 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (8 bytes)" %} |
kvn@4001 | 4924 | ins_encode %{ |
kvn@4001 | 4925 | bool vector256 = false; |
kvn@4001 | 4926 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4927 | %} |
kvn@4001 | 4928 | ins_pipe( pipe_slow ); |
kvn@4001 | 4929 | %} |
kvn@4001 | 4930 | |
kvn@4001 | 4931 | instruct vxor16B(vecX dst, vecX src) %{ |
kvn@4001 | 4932 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4933 | match(Set dst (XorV dst src)); |
kvn@4001 | 4934 | format %{ "pxor $dst,$src\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4935 | ins_encode %{ |
kvn@4001 | 4936 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4937 | %} |
kvn@4001 | 4938 | ins_pipe( pipe_slow ); |
kvn@4001 | 4939 | %} |
kvn@4001 | 4940 | |
kvn@4001 | 4941 | instruct vxor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4942 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4943 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4944 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4945 | ins_encode %{ |
kvn@4001 | 4946 | bool vector256 = false; |
kvn@4001 | 4947 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4948 | %} |
kvn@4001 | 4949 | ins_pipe( pipe_slow ); |
kvn@4001 | 4950 | %} |
kvn@4001 | 4951 | |
kvn@4001 | 4952 | instruct vxor16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4953 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4954 | match(Set dst (XorV src (LoadVector mem))); |
kvn@4001 | 4955 | format %{ "vpxor $dst,$src,$mem\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4956 | ins_encode %{ |
kvn@4001 | 4957 | bool vector256 = false; |
kvn@4001 | 4958 | __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4959 | %} |
kvn@4001 | 4960 | ins_pipe( pipe_slow ); |
kvn@4001 | 4961 | %} |
kvn@4001 | 4962 | |
kvn@4001 | 4963 | instruct vxor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4964 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4965 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4966 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (32 bytes)" %} |
kvn@4001 | 4967 | ins_encode %{ |
kvn@4001 | 4968 | bool vector256 = true; |
kvn@4001 | 4969 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4970 | %} |
kvn@4001 | 4971 | ins_pipe( pipe_slow ); |
kvn@4001 | 4972 | %} |
kvn@4001 | 4973 | |
kvn@4001 | 4974 | instruct vxor32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4975 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4976 | match(Set dst (XorV src (LoadVector mem))); |
kvn@4001 | 4977 | format %{ "vpxor $dst,$src,$mem\t! xor vectors (32 bytes)" %} |
kvn@4001 | 4978 | ins_encode %{ |
kvn@4001 | 4979 | bool vector256 = true; |
kvn@4001 | 4980 | __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4981 | %} |
kvn@4001 | 4982 | ins_pipe( pipe_slow ); |
kvn@4001 | 4983 | %} |
kvn@4001 | 4984 |