Sat, 29 Sep 2012 06:40:00 -0400
8000213: NPG: Should have renamed arrayKlass and typeArrayKlass
Summary: Capitalize these metadata types (and objArrayKlass)
Reviewed-by: stefank, twisti, kvn
kvn@3390 | 1 | // |
kvn@3577 | 2 | // Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. |
kvn@3390 | 3 | // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
kvn@3390 | 4 | // |
kvn@3390 | 5 | // This code is free software; you can redistribute it and/or modify it |
kvn@3390 | 6 | // under the terms of the GNU General Public License version 2 only, as |
kvn@3390 | 7 | // published by the Free Software Foundation. |
kvn@3390 | 8 | // |
kvn@3390 | 9 | // This code is distributed in the hope that it will be useful, but WITHOUT |
kvn@3390 | 10 | // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
kvn@3390 | 11 | // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
kvn@3390 | 12 | // version 2 for more details (a copy is included in the LICENSE file that |
kvn@3390 | 13 | // accompanied this code). |
kvn@3390 | 14 | // |
kvn@3390 | 15 | // You should have received a copy of the GNU General Public License version |
kvn@3390 | 16 | // 2 along with this work; if not, write to the Free Software Foundation, |
kvn@3390 | 17 | // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
kvn@3390 | 18 | // |
kvn@3390 | 19 | // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
kvn@3390 | 20 | // or visit www.oracle.com if you need additional information or have any |
kvn@3390 | 21 | // questions. |
kvn@3390 | 22 | // |
kvn@3390 | 23 | // |
kvn@3390 | 24 | |
kvn@3390 | 25 | // X86 Common Architecture Description File |
kvn@3390 | 26 | |
kvn@3882 | 27 | //----------REGISTER DEFINITION BLOCK------------------------------------------ |
kvn@3882 | 28 | // This information is used by the matcher and the register allocator to |
kvn@3882 | 29 | // describe individual registers and classes of registers within the target |
kvn@3882 | 30 | // archtecture. |
kvn@3882 | 31 | |
kvn@3882 | 32 | register %{ |
kvn@3882 | 33 | //----------Architecture Description Register Definitions---------------------- |
kvn@3882 | 34 | // General Registers |
kvn@3882 | 35 | // "reg_def" name ( register save type, C convention save type, |
kvn@3882 | 36 | // ideal register type, encoding ); |
kvn@3882 | 37 | // Register Save Types: |
kvn@3882 | 38 | // |
kvn@3882 | 39 | // NS = No-Save: The register allocator assumes that these registers |
kvn@3882 | 40 | // can be used without saving upon entry to the method, & |
kvn@3882 | 41 | // that they do not need to be saved at call sites. |
kvn@3882 | 42 | // |
kvn@3882 | 43 | // SOC = Save-On-Call: The register allocator assumes that these registers |
kvn@3882 | 44 | // can be used without saving upon entry to the method, |
kvn@3882 | 45 | // but that they must be saved at call sites. |
kvn@3882 | 46 | // |
kvn@3882 | 47 | // SOE = Save-On-Entry: The register allocator assumes that these registers |
kvn@3882 | 48 | // must be saved before using them upon entry to the |
kvn@3882 | 49 | // method, but they do not need to be saved at call |
kvn@3882 | 50 | // sites. |
kvn@3882 | 51 | // |
kvn@3882 | 52 | // AS = Always-Save: The register allocator assumes that these registers |
kvn@3882 | 53 | // must be saved before using them upon entry to the |
kvn@3882 | 54 | // method, & that they must be saved at call sites. |
kvn@3882 | 55 | // |
kvn@3882 | 56 | // Ideal Register Type is used to determine how to save & restore a |
kvn@3882 | 57 | // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get |
kvn@3882 | 58 | // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. |
kvn@3882 | 59 | // |
kvn@3882 | 60 | // The encoding number is the actual bit-pattern placed into the opcodes. |
kvn@3882 | 61 | |
kvn@3882 | 62 | // XMM registers. 256-bit registers or 8 words each, labeled (a)-h. |
kvn@3882 | 63 | // Word a in each register holds a Float, words ab hold a Double. |
kvn@3882 | 64 | // The whole registers are used in SSE4.2 version intrinsics, |
kvn@3882 | 65 | // array copy stubs and superword operations (see UseSSE42Intrinsics, |
kvn@3882 | 66 | // UseXMMForArrayCopy and UseSuperword flags). |
kvn@3882 | 67 | // XMM8-XMM15 must be encoded with REX (VEX for UseAVX). |
kvn@3882 | 68 | // Linux ABI: No register preserved across function calls |
kvn@3882 | 69 | // XMM0-XMM7 might hold parameters |
kvn@3882 | 70 | // Windows ABI: XMM6-XMM15 preserved across function calls |
kvn@3882 | 71 | // XMM0-XMM3 might hold parameters |
kvn@3882 | 72 | |
kvn@3882 | 73 | reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); |
kvn@3929 | 74 | reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1)); |
kvn@3929 | 75 | reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2)); |
kvn@3929 | 76 | reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3)); |
kvn@3929 | 77 | reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4)); |
kvn@3929 | 78 | reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5)); |
kvn@3929 | 79 | reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6)); |
kvn@3929 | 80 | reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7)); |
kvn@3882 | 81 | |
kvn@3882 | 82 | reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); |
kvn@3929 | 83 | reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1)); |
kvn@3929 | 84 | reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2)); |
kvn@3929 | 85 | reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3)); |
kvn@3929 | 86 | reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4)); |
kvn@3929 | 87 | reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5)); |
kvn@3929 | 88 | reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6)); |
kvn@3929 | 89 | reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7)); |
kvn@3882 | 90 | |
kvn@3882 | 91 | reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); |
kvn@3929 | 92 | reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1)); |
kvn@3929 | 93 | reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2)); |
kvn@3929 | 94 | reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3)); |
kvn@3929 | 95 | reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4)); |
kvn@3929 | 96 | reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5)); |
kvn@3929 | 97 | reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6)); |
kvn@3929 | 98 | reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7)); |
kvn@3882 | 99 | |
kvn@3882 | 100 | reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); |
kvn@3929 | 101 | reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1)); |
kvn@3929 | 102 | reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2)); |
kvn@3929 | 103 | reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3)); |
kvn@3929 | 104 | reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4)); |
kvn@3929 | 105 | reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5)); |
kvn@3929 | 106 | reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6)); |
kvn@3929 | 107 | reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7)); |
kvn@3882 | 108 | |
kvn@3882 | 109 | reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); |
kvn@3929 | 110 | reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1)); |
kvn@3929 | 111 | reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2)); |
kvn@3929 | 112 | reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3)); |
kvn@3929 | 113 | reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4)); |
kvn@3929 | 114 | reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5)); |
kvn@3929 | 115 | reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6)); |
kvn@3929 | 116 | reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7)); |
kvn@3882 | 117 | |
kvn@3882 | 118 | reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); |
kvn@3929 | 119 | reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1)); |
kvn@3929 | 120 | reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2)); |
kvn@3929 | 121 | reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3)); |
kvn@3929 | 122 | reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4)); |
kvn@3929 | 123 | reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5)); |
kvn@3929 | 124 | reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6)); |
kvn@3929 | 125 | reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7)); |
kvn@3882 | 126 | |
kvn@3882 | 127 | #ifdef _WIN64 |
kvn@3882 | 128 | |
kvn@3882 | 129 | reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()); |
kvn@3929 | 130 | reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(1)); |
kvn@3929 | 131 | reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(2)); |
kvn@3929 | 132 | reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(3)); |
kvn@3929 | 133 | reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(4)); |
kvn@3929 | 134 | reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(5)); |
kvn@3929 | 135 | reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(6)); |
kvn@3929 | 136 | reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(7)); |
kvn@3882 | 137 | |
kvn@3882 | 138 | reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()); |
kvn@3929 | 139 | reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(1)); |
kvn@3929 | 140 | reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(2)); |
kvn@3929 | 141 | reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(3)); |
kvn@3929 | 142 | reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(4)); |
kvn@3929 | 143 | reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(5)); |
kvn@3929 | 144 | reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(6)); |
kvn@3929 | 145 | reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(7)); |
kvn@3882 | 146 | |
kvn@3882 | 147 | reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()); |
kvn@3929 | 148 | reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(1)); |
kvn@3929 | 149 | reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(2)); |
kvn@3929 | 150 | reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(3)); |
kvn@3929 | 151 | reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(4)); |
kvn@3929 | 152 | reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(5)); |
kvn@3929 | 153 | reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(6)); |
kvn@3929 | 154 | reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(7)); |
kvn@3882 | 155 | |
kvn@3882 | 156 | reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()); |
kvn@3929 | 157 | reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(1)); |
kvn@3929 | 158 | reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(2)); |
kvn@3929 | 159 | reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(3)); |
kvn@3929 | 160 | reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(4)); |
kvn@3929 | 161 | reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(5)); |
kvn@3929 | 162 | reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(6)); |
kvn@3929 | 163 | reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(7)); |
kvn@3882 | 164 | |
kvn@3882 | 165 | reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()); |
kvn@3929 | 166 | reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(1)); |
kvn@3929 | 167 | reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(2)); |
kvn@3929 | 168 | reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(3)); |
kvn@3929 | 169 | reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(4)); |
kvn@3929 | 170 | reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(5)); |
kvn@3929 | 171 | reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(6)); |
kvn@3929 | 172 | reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(7)); |
kvn@3882 | 173 | |
kvn@3882 | 174 | reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()); |
kvn@3929 | 175 | reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(1)); |
kvn@3929 | 176 | reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(2)); |
kvn@3929 | 177 | reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(3)); |
kvn@3929 | 178 | reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(4)); |
kvn@3929 | 179 | reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(5)); |
kvn@3929 | 180 | reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(6)); |
kvn@3929 | 181 | reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(7)); |
kvn@3882 | 182 | |
kvn@3882 | 183 | reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()); |
kvn@3929 | 184 | reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(1)); |
kvn@3929 | 185 | reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(2)); |
kvn@3929 | 186 | reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(3)); |
kvn@3929 | 187 | reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(4)); |
kvn@3929 | 188 | reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(5)); |
kvn@3929 | 189 | reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(6)); |
kvn@3929 | 190 | reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(7)); |
kvn@3882 | 191 | |
kvn@3882 | 192 | reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()); |
kvn@3929 | 193 | reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(1)); |
kvn@3929 | 194 | reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(2)); |
kvn@3929 | 195 | reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(3)); |
kvn@3929 | 196 | reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(4)); |
kvn@3929 | 197 | reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(5)); |
kvn@3929 | 198 | reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(6)); |
kvn@3929 | 199 | reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(7)); |
kvn@3882 | 200 | |
kvn@3882 | 201 | reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()); |
kvn@3929 | 202 | reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(1)); |
kvn@3929 | 203 | reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(2)); |
kvn@3929 | 204 | reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(3)); |
kvn@3929 | 205 | reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(4)); |
kvn@3929 | 206 | reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(5)); |
kvn@3929 | 207 | reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(6)); |
kvn@3929 | 208 | reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(7)); |
kvn@3882 | 209 | |
kvn@3882 | 210 | reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()); |
kvn@3929 | 211 | reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(1)); |
kvn@3929 | 212 | reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(2)); |
kvn@3929 | 213 | reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(3)); |
kvn@3929 | 214 | reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(4)); |
kvn@3929 | 215 | reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(5)); |
kvn@3929 | 216 | reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(6)); |
kvn@3929 | 217 | reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(7)); |
kvn@3882 | 218 | |
kvn@3882 | 219 | #else // _WIN64 |
kvn@3882 | 220 | |
kvn@3882 | 221 | reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); |
kvn@3929 | 222 | reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1)); |
kvn@3929 | 223 | reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2)); |
kvn@3929 | 224 | reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3)); |
kvn@3929 | 225 | reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4)); |
kvn@3929 | 226 | reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5)); |
kvn@3929 | 227 | reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6)); |
kvn@3929 | 228 | reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7)); |
kvn@3882 | 229 | |
kvn@3882 | 230 | reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); |
kvn@3929 | 231 | reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1)); |
kvn@3929 | 232 | reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2)); |
kvn@3929 | 233 | reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3)); |
kvn@3929 | 234 | reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4)); |
kvn@3929 | 235 | reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5)); |
kvn@3929 | 236 | reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6)); |
kvn@3929 | 237 | reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7)); |
kvn@3882 | 238 | |
kvn@3882 | 239 | #ifdef _LP64 |
kvn@3882 | 240 | |
kvn@3882 | 241 | reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); |
kvn@3929 | 242 | reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); |
kvn@3929 | 243 | reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); |
kvn@3929 | 244 | reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3)); |
kvn@3929 | 245 | reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4)); |
kvn@3929 | 246 | reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5)); |
kvn@3929 | 247 | reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6)); |
kvn@3929 | 248 | reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7)); |
kvn@3882 | 249 | |
kvn@3882 | 250 | reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); |
kvn@3929 | 251 | reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1)); |
kvn@3929 | 252 | reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2)); |
kvn@3929 | 253 | reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3)); |
kvn@3929 | 254 | reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4)); |
kvn@3929 | 255 | reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5)); |
kvn@3929 | 256 | reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6)); |
kvn@3929 | 257 | reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7)); |
kvn@3882 | 258 | |
kvn@3882 | 259 | reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); |
kvn@3929 | 260 | reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1)); |
kvn@3929 | 261 | reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2)); |
kvn@3929 | 262 | reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3)); |
kvn@3929 | 263 | reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4)); |
kvn@3929 | 264 | reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5)); |
kvn@3929 | 265 | reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6)); |
kvn@3929 | 266 | reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7)); |
kvn@3882 | 267 | |
kvn@3882 | 268 | reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); |
kvn@3929 | 269 | reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1)); |
kvn@3929 | 270 | reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2)); |
kvn@3929 | 271 | reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3)); |
kvn@3929 | 272 | reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4)); |
kvn@3929 | 273 | reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5)); |
kvn@3929 | 274 | reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6)); |
kvn@3929 | 275 | reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7)); |
kvn@3882 | 276 | |
kvn@3882 | 277 | reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); |
kvn@3929 | 278 | reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1)); |
kvn@3929 | 279 | reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2)); |
kvn@3929 | 280 | reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3)); |
kvn@3929 | 281 | reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4)); |
kvn@3929 | 282 | reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5)); |
kvn@3929 | 283 | reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6)); |
kvn@3929 | 284 | reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7)); |
kvn@3882 | 285 | |
kvn@3882 | 286 | reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); |
kvn@3929 | 287 | reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1)); |
kvn@3929 | 288 | reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2)); |
kvn@3929 | 289 | reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3)); |
kvn@3929 | 290 | reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4)); |
kvn@3929 | 291 | reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5)); |
kvn@3929 | 292 | reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6)); |
kvn@3929 | 293 | reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7)); |
kvn@3882 | 294 | |
kvn@3882 | 295 | reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); |
kvn@3929 | 296 | reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1)); |
kvn@3929 | 297 | reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2)); |
kvn@3929 | 298 | reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3)); |
kvn@3929 | 299 | reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4)); |
kvn@3929 | 300 | reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5)); |
kvn@3929 | 301 | reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6)); |
kvn@3929 | 302 | reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7)); |
kvn@3882 | 303 | |
kvn@3882 | 304 | reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); |
kvn@3929 | 305 | reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1)); |
kvn@3929 | 306 | reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2)); |
kvn@3929 | 307 | reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3)); |
kvn@3929 | 308 | reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4)); |
kvn@3929 | 309 | reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5)); |
kvn@3929 | 310 | reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6)); |
kvn@3929 | 311 | reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7)); |
kvn@3882 | 312 | |
kvn@3882 | 313 | #endif // _LP64 |
kvn@3882 | 314 | |
kvn@3882 | 315 | #endif // _WIN64 |
kvn@3882 | 316 | |
kvn@3882 | 317 | #ifdef _LP64 |
kvn@3882 | 318 | reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); |
kvn@3882 | 319 | #else |
kvn@3882 | 320 | reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad()); |
kvn@3882 | 321 | #endif // _LP64 |
kvn@3882 | 322 | |
kvn@3882 | 323 | alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
kvn@3882 | 324 | XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
kvn@3882 | 325 | XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
kvn@3882 | 326 | XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
kvn@3882 | 327 | XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
kvn@3882 | 328 | XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
kvn@3882 | 329 | XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
kvn@3882 | 330 | XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
kvn@3882 | 331 | #ifdef _LP64 |
kvn@3882 | 332 | ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
kvn@3882 | 333 | XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
kvn@3882 | 334 | XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
kvn@3882 | 335 | XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
kvn@3882 | 336 | XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
kvn@3882 | 337 | XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
kvn@3882 | 338 | XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
kvn@3882 | 339 | XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h |
kvn@3882 | 340 | #endif |
kvn@3882 | 341 | ); |
kvn@3882 | 342 | |
kvn@3882 | 343 | // flags allocation class should be last. |
kvn@3882 | 344 | alloc_class chunk2(RFLAGS); |
kvn@3882 | 345 | |
kvn@3882 | 346 | // Singleton class for condition codes |
kvn@3882 | 347 | reg_class int_flags(RFLAGS); |
kvn@3882 | 348 | |
kvn@3882 | 349 | // Class for all float registers |
kvn@3882 | 350 | reg_class float_reg(XMM0, |
kvn@3882 | 351 | XMM1, |
kvn@3882 | 352 | XMM2, |
kvn@3882 | 353 | XMM3, |
kvn@3882 | 354 | XMM4, |
kvn@3882 | 355 | XMM5, |
kvn@3882 | 356 | XMM6, |
kvn@3882 | 357 | XMM7 |
kvn@3882 | 358 | #ifdef _LP64 |
kvn@3882 | 359 | ,XMM8, |
kvn@3882 | 360 | XMM9, |
kvn@3882 | 361 | XMM10, |
kvn@3882 | 362 | XMM11, |
kvn@3882 | 363 | XMM12, |
kvn@3882 | 364 | XMM13, |
kvn@3882 | 365 | XMM14, |
kvn@3882 | 366 | XMM15 |
kvn@3882 | 367 | #endif |
kvn@3882 | 368 | ); |
kvn@3882 | 369 | |
kvn@3882 | 370 | // Class for all double registers |
kvn@3882 | 371 | reg_class double_reg(XMM0, XMM0b, |
kvn@3882 | 372 | XMM1, XMM1b, |
kvn@3882 | 373 | XMM2, XMM2b, |
kvn@3882 | 374 | XMM3, XMM3b, |
kvn@3882 | 375 | XMM4, XMM4b, |
kvn@3882 | 376 | XMM5, XMM5b, |
kvn@3882 | 377 | XMM6, XMM6b, |
kvn@3882 | 378 | XMM7, XMM7b |
kvn@3882 | 379 | #ifdef _LP64 |
kvn@3882 | 380 | ,XMM8, XMM8b, |
kvn@3882 | 381 | XMM9, XMM9b, |
kvn@3882 | 382 | XMM10, XMM10b, |
kvn@3882 | 383 | XMM11, XMM11b, |
kvn@3882 | 384 | XMM12, XMM12b, |
kvn@3882 | 385 | XMM13, XMM13b, |
kvn@3882 | 386 | XMM14, XMM14b, |
kvn@3882 | 387 | XMM15, XMM15b |
kvn@3882 | 388 | #endif |
kvn@3882 | 389 | ); |
kvn@3882 | 390 | |
kvn@3882 | 391 | // Class for all 32bit vector registers |
kvn@3882 | 392 | reg_class vectors_reg(XMM0, |
kvn@3882 | 393 | XMM1, |
kvn@3882 | 394 | XMM2, |
kvn@3882 | 395 | XMM3, |
kvn@3882 | 396 | XMM4, |
kvn@3882 | 397 | XMM5, |
kvn@3882 | 398 | XMM6, |
kvn@3882 | 399 | XMM7 |
kvn@3882 | 400 | #ifdef _LP64 |
kvn@3882 | 401 | ,XMM8, |
kvn@3882 | 402 | XMM9, |
kvn@3882 | 403 | XMM10, |
kvn@3882 | 404 | XMM11, |
kvn@3882 | 405 | XMM12, |
kvn@3882 | 406 | XMM13, |
kvn@3882 | 407 | XMM14, |
kvn@3882 | 408 | XMM15 |
kvn@3882 | 409 | #endif |
kvn@3882 | 410 | ); |
kvn@3882 | 411 | |
kvn@3882 | 412 | // Class for all 64bit vector registers |
kvn@3882 | 413 | reg_class vectord_reg(XMM0, XMM0b, |
kvn@3882 | 414 | XMM1, XMM1b, |
kvn@3882 | 415 | XMM2, XMM2b, |
kvn@3882 | 416 | XMM3, XMM3b, |
kvn@3882 | 417 | XMM4, XMM4b, |
kvn@3882 | 418 | XMM5, XMM5b, |
kvn@3882 | 419 | XMM6, XMM6b, |
kvn@3882 | 420 | XMM7, XMM7b |
kvn@3882 | 421 | #ifdef _LP64 |
kvn@3882 | 422 | ,XMM8, XMM8b, |
kvn@3882 | 423 | XMM9, XMM9b, |
kvn@3882 | 424 | XMM10, XMM10b, |
kvn@3882 | 425 | XMM11, XMM11b, |
kvn@3882 | 426 | XMM12, XMM12b, |
kvn@3882 | 427 | XMM13, XMM13b, |
kvn@3882 | 428 | XMM14, XMM14b, |
kvn@3882 | 429 | XMM15, XMM15b |
kvn@3882 | 430 | #endif |
kvn@3882 | 431 | ); |
kvn@3882 | 432 | |
kvn@3882 | 433 | // Class for all 128bit vector registers |
kvn@3882 | 434 | reg_class vectorx_reg(XMM0, XMM0b, XMM0c, XMM0d, |
kvn@3882 | 435 | XMM1, XMM1b, XMM1c, XMM1d, |
kvn@3882 | 436 | XMM2, XMM2b, XMM2c, XMM2d, |
kvn@3882 | 437 | XMM3, XMM3b, XMM3c, XMM3d, |
kvn@3882 | 438 | XMM4, XMM4b, XMM4c, XMM4d, |
kvn@3882 | 439 | XMM5, XMM5b, XMM5c, XMM5d, |
kvn@3882 | 440 | XMM6, XMM6b, XMM6c, XMM6d, |
kvn@3882 | 441 | XMM7, XMM7b, XMM7c, XMM7d |
kvn@3882 | 442 | #ifdef _LP64 |
kvn@3882 | 443 | ,XMM8, XMM8b, XMM8c, XMM8d, |
kvn@3882 | 444 | XMM9, XMM9b, XMM9c, XMM9d, |
kvn@3882 | 445 | XMM10, XMM10b, XMM10c, XMM10d, |
kvn@3882 | 446 | XMM11, XMM11b, XMM11c, XMM11d, |
kvn@3882 | 447 | XMM12, XMM12b, XMM12c, XMM12d, |
kvn@3882 | 448 | XMM13, XMM13b, XMM13c, XMM13d, |
kvn@3882 | 449 | XMM14, XMM14b, XMM14c, XMM14d, |
kvn@3882 | 450 | XMM15, XMM15b, XMM15c, XMM15d |
kvn@3882 | 451 | #endif |
kvn@3882 | 452 | ); |
kvn@3882 | 453 | |
kvn@3882 | 454 | // Class for all 256bit vector registers |
kvn@3882 | 455 | reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
kvn@3882 | 456 | XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
kvn@3882 | 457 | XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
kvn@3882 | 458 | XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
kvn@3882 | 459 | XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
kvn@3882 | 460 | XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
kvn@3882 | 461 | XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
kvn@3882 | 462 | XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
kvn@3882 | 463 | #ifdef _LP64 |
kvn@3882 | 464 | ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
kvn@3882 | 465 | XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
kvn@3882 | 466 | XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
kvn@3882 | 467 | XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
kvn@3882 | 468 | XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
kvn@3882 | 469 | XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
kvn@3882 | 470 | XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
kvn@3882 | 471 | XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h |
kvn@3882 | 472 | #endif |
kvn@3882 | 473 | ); |
kvn@3882 | 474 | |
kvn@3882 | 475 | %} |
kvn@3882 | 476 | |
kvn@3390 | 477 | source %{ |
kvn@3390 | 478 | // Float masks come from different places depending on platform. |
kvn@3390 | 479 | #ifdef _LP64 |
kvn@3390 | 480 | static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } |
kvn@3390 | 481 | static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } |
kvn@3390 | 482 | static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } |
kvn@3390 | 483 | static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } |
kvn@3390 | 484 | #else |
kvn@3390 | 485 | static address float_signmask() { return (address)float_signmask_pool; } |
kvn@3390 | 486 | static address float_signflip() { return (address)float_signflip_pool; } |
kvn@3390 | 487 | static address double_signmask() { return (address)double_signmask_pool; } |
kvn@3390 | 488 | static address double_signflip() { return (address)double_signflip_pool; } |
kvn@3390 | 489 | #endif |
kvn@3577 | 490 | |
kvn@3882 | 491 | |
kvn@4001 | 492 | const bool Matcher::match_rule_supported(int opcode) { |
kvn@4001 | 493 | if (!has_match_rule(opcode)) |
kvn@4001 | 494 | return false; |
kvn@4001 | 495 | |
kvn@4001 | 496 | switch (opcode) { |
kvn@4001 | 497 | case Op_PopCountI: |
kvn@4001 | 498 | case Op_PopCountL: |
kvn@4001 | 499 | if (!UsePopCountInstruction) |
kvn@4001 | 500 | return false; |
kvn@4103 | 501 | break; |
kvn@4001 | 502 | case Op_MulVI: |
kvn@4001 | 503 | if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX |
kvn@4001 | 504 | return false; |
kvn@4001 | 505 | break; |
roland@4106 | 506 | case Op_CompareAndSwapL: |
roland@4106 | 507 | #ifdef _LP64 |
roland@4106 | 508 | case Op_CompareAndSwapP: |
roland@4106 | 509 | #endif |
roland@4106 | 510 | if (!VM_Version::supports_cx8()) |
roland@4106 | 511 | return false; |
roland@4106 | 512 | break; |
kvn@4001 | 513 | } |
kvn@4001 | 514 | |
kvn@4001 | 515 | return true; // Per default match rules are supported. |
kvn@4001 | 516 | } |
kvn@4001 | 517 | |
kvn@3882 | 518 | // Max vector size in bytes. 0 if not supported. |
kvn@3882 | 519 | const int Matcher::vector_width_in_bytes(BasicType bt) { |
kvn@3882 | 520 | assert(is_java_primitive(bt), "only primitive type vectors"); |
kvn@3882 | 521 | if (UseSSE < 2) return 0; |
kvn@3882 | 522 | // SSE2 supports 128bit vectors for all types. |
kvn@3882 | 523 | // AVX2 supports 256bit vectors for all types. |
kvn@3882 | 524 | int size = (UseAVX > 1) ? 32 : 16; |
kvn@3882 | 525 | // AVX1 supports 256bit vectors only for FLOAT and DOUBLE. |
kvn@3882 | 526 | if (UseAVX > 0 && (bt == T_FLOAT || bt == T_DOUBLE)) |
kvn@3882 | 527 | size = 32; |
kvn@3882 | 528 | // Use flag to limit vector size. |
kvn@3882 | 529 | size = MIN2(size,(int)MaxVectorSize); |
kvn@3882 | 530 | // Minimum 2 values in vector (or 4 for bytes). |
kvn@3882 | 531 | switch (bt) { |
kvn@3882 | 532 | case T_DOUBLE: |
kvn@3882 | 533 | case T_LONG: |
kvn@3882 | 534 | if (size < 16) return 0; |
kvn@3882 | 535 | case T_FLOAT: |
kvn@3882 | 536 | case T_INT: |
kvn@3882 | 537 | if (size < 8) return 0; |
kvn@3882 | 538 | case T_BOOLEAN: |
kvn@3882 | 539 | case T_BYTE: |
kvn@3882 | 540 | case T_CHAR: |
kvn@3882 | 541 | case T_SHORT: |
kvn@3882 | 542 | if (size < 4) return 0; |
kvn@3882 | 543 | break; |
kvn@3882 | 544 | default: |
kvn@3882 | 545 | ShouldNotReachHere(); |
kvn@3882 | 546 | } |
kvn@3882 | 547 | return size; |
kvn@3882 | 548 | } |
kvn@3882 | 549 | |
kvn@3882 | 550 | // Limits on vector size (number of elements) loaded into vector. |
kvn@3882 | 551 | const int Matcher::max_vector_size(const BasicType bt) { |
kvn@3882 | 552 | return vector_width_in_bytes(bt)/type2aelembytes(bt); |
kvn@3882 | 553 | } |
kvn@3882 | 554 | const int Matcher::min_vector_size(const BasicType bt) { |
kvn@3882 | 555 | int max_size = max_vector_size(bt); |
kvn@3882 | 556 | // Min size which can be loaded into vector is 4 bytes. |
kvn@3882 | 557 | int size = (type2aelembytes(bt) == 1) ? 4 : 2; |
kvn@3882 | 558 | return MIN2(size,max_size); |
kvn@3882 | 559 | } |
kvn@3882 | 560 | |
kvn@3882 | 561 | // Vector ideal reg corresponding to specidied size in bytes |
kvn@3882 | 562 | const int Matcher::vector_ideal_reg(int size) { |
kvn@3882 | 563 | assert(MaxVectorSize >= size, ""); |
kvn@3882 | 564 | switch(size) { |
kvn@3882 | 565 | case 4: return Op_VecS; |
kvn@3882 | 566 | case 8: return Op_VecD; |
kvn@3882 | 567 | case 16: return Op_VecX; |
kvn@3882 | 568 | case 32: return Op_VecY; |
kvn@3882 | 569 | } |
kvn@3882 | 570 | ShouldNotReachHere(); |
kvn@3882 | 571 | return 0; |
kvn@3882 | 572 | } |
kvn@3882 | 573 | |
kvn@3882 | 574 | // x86 supports misaligned vectors store/load. |
kvn@3882 | 575 | const bool Matcher::misaligned_vectors_ok() { |
kvn@3882 | 576 | return !AlignVector; // can be changed by flag |
kvn@3882 | 577 | } |
kvn@3882 | 578 | |
kvn@3882 | 579 | // Helper methods for MachSpillCopyNode::implementation(). |
kvn@3882 | 580 | static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, |
kvn@3882 | 581 | int src_hi, int dst_hi, uint ireg, outputStream* st) { |
kvn@3882 | 582 | // In 64-bit VM size calculation is very complex. Emitting instructions |
kvn@3882 | 583 | // into scratch buffer is used to get size in 64-bit VM. |
kvn@3882 | 584 | LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
kvn@3882 | 585 | assert(ireg == Op_VecS || // 32bit vector |
kvn@3882 | 586 | (src_lo & 1) == 0 && (src_lo + 1) == src_hi && |
kvn@3882 | 587 | (dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi, |
kvn@3882 | 588 | "no non-adjacent vector moves" ); |
kvn@3882 | 589 | if (cbuf) { |
kvn@3882 | 590 | MacroAssembler _masm(cbuf); |
kvn@3882 | 591 | int offset = __ offset(); |
kvn@3882 | 592 | switch (ireg) { |
kvn@3882 | 593 | case Op_VecS: // copy whole register |
kvn@3882 | 594 | case Op_VecD: |
kvn@3882 | 595 | case Op_VecX: |
kvn@3882 | 596 | __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
kvn@3882 | 597 | break; |
kvn@3882 | 598 | case Op_VecY: |
kvn@3882 | 599 | __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
kvn@3882 | 600 | break; |
kvn@3882 | 601 | default: |
kvn@3882 | 602 | ShouldNotReachHere(); |
kvn@3882 | 603 | } |
kvn@3882 | 604 | int size = __ offset() - offset; |
kvn@3882 | 605 | #ifdef ASSERT |
kvn@3882 | 606 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 607 | assert(!do_size || size == 4, "incorrect size calculattion"); |
kvn@3882 | 608 | #endif |
kvn@3882 | 609 | return size; |
kvn@3882 | 610 | #ifndef PRODUCT |
kvn@3882 | 611 | } else if (!do_size) { |
kvn@3882 | 612 | switch (ireg) { |
kvn@3882 | 613 | case Op_VecS: |
kvn@3882 | 614 | case Op_VecD: |
kvn@3882 | 615 | case Op_VecX: |
kvn@3882 | 616 | st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
kvn@3882 | 617 | break; |
kvn@3882 | 618 | case Op_VecY: |
kvn@3882 | 619 | st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
kvn@3882 | 620 | break; |
kvn@3882 | 621 | default: |
kvn@3882 | 622 | ShouldNotReachHere(); |
kvn@3882 | 623 | } |
kvn@3882 | 624 | #endif |
kvn@3882 | 625 | } |
kvn@3882 | 626 | // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 627 | return 4; |
kvn@3882 | 628 | } |
kvn@3882 | 629 | |
kvn@3882 | 630 | static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, |
kvn@3882 | 631 | int stack_offset, int reg, uint ireg, outputStream* st) { |
kvn@3882 | 632 | // In 64-bit VM size calculation is very complex. Emitting instructions |
kvn@3882 | 633 | // into scratch buffer is used to get size in 64-bit VM. |
kvn@3882 | 634 | LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
kvn@3882 | 635 | if (cbuf) { |
kvn@3882 | 636 | MacroAssembler _masm(cbuf); |
kvn@3882 | 637 | int offset = __ offset(); |
kvn@3882 | 638 | if (is_load) { |
kvn@3882 | 639 | switch (ireg) { |
kvn@3882 | 640 | case Op_VecS: |
kvn@3882 | 641 | __ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 642 | break; |
kvn@3882 | 643 | case Op_VecD: |
kvn@3882 | 644 | __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 645 | break; |
kvn@3882 | 646 | case Op_VecX: |
kvn@3882 | 647 | __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 648 | break; |
kvn@3882 | 649 | case Op_VecY: |
kvn@3882 | 650 | __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
kvn@3882 | 651 | break; |
kvn@3882 | 652 | default: |
kvn@3882 | 653 | ShouldNotReachHere(); |
kvn@3882 | 654 | } |
kvn@3882 | 655 | } else { // store |
kvn@3882 | 656 | switch (ireg) { |
kvn@3882 | 657 | case Op_VecS: |
kvn@3882 | 658 | __ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 659 | break; |
kvn@3882 | 660 | case Op_VecD: |
kvn@3882 | 661 | __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 662 | break; |
kvn@3882 | 663 | case Op_VecX: |
kvn@3882 | 664 | __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 665 | break; |
kvn@3882 | 666 | case Op_VecY: |
kvn@3882 | 667 | __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
kvn@3882 | 668 | break; |
kvn@3882 | 669 | default: |
kvn@3882 | 670 | ShouldNotReachHere(); |
kvn@3882 | 671 | } |
kvn@3882 | 672 | } |
kvn@3882 | 673 | int size = __ offset() - offset; |
kvn@3882 | 674 | #ifdef ASSERT |
kvn@3882 | 675 | int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); |
kvn@3882 | 676 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 677 | assert(!do_size || size == (5+offset_size), "incorrect size calculattion"); |
kvn@3882 | 678 | #endif |
kvn@3882 | 679 | return size; |
kvn@3882 | 680 | #ifndef PRODUCT |
kvn@3882 | 681 | } else if (!do_size) { |
kvn@3882 | 682 | if (is_load) { |
kvn@3882 | 683 | switch (ireg) { |
kvn@3882 | 684 | case Op_VecS: |
kvn@3882 | 685 | st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 686 | break; |
kvn@3882 | 687 | case Op_VecD: |
kvn@3882 | 688 | st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 689 | break; |
kvn@3882 | 690 | case Op_VecX: |
kvn@3882 | 691 | st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 692 | break; |
kvn@3882 | 693 | case Op_VecY: |
kvn@3882 | 694 | st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
kvn@3882 | 695 | break; |
kvn@3882 | 696 | default: |
kvn@3882 | 697 | ShouldNotReachHere(); |
kvn@3882 | 698 | } |
kvn@3882 | 699 | } else { // store |
kvn@3882 | 700 | switch (ireg) { |
kvn@3882 | 701 | case Op_VecS: |
kvn@3882 | 702 | st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 703 | break; |
kvn@3882 | 704 | case Op_VecD: |
kvn@3882 | 705 | st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 706 | break; |
kvn@3882 | 707 | case Op_VecX: |
kvn@3882 | 708 | st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 709 | break; |
kvn@3882 | 710 | case Op_VecY: |
kvn@3882 | 711 | st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
kvn@3882 | 712 | break; |
kvn@3882 | 713 | default: |
kvn@3882 | 714 | ShouldNotReachHere(); |
kvn@3882 | 715 | } |
kvn@3882 | 716 | } |
kvn@3882 | 717 | #endif |
kvn@3882 | 718 | } |
kvn@3882 | 719 | int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); |
kvn@3882 | 720 | // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
kvn@3882 | 721 | return 5+offset_size; |
kvn@3882 | 722 | } |
kvn@3882 | 723 | |
kvn@3882 | 724 | static inline jfloat replicate4_imm(int con, int width) { |
kvn@3882 | 725 | // Load a constant of "width" (in bytes) and replicate it to fill 32bit. |
kvn@3882 | 726 | assert(width == 1 || width == 2, "only byte or short types here"); |
kvn@3882 | 727 | int bit_width = width * 8; |
kvn@3882 | 728 | jint val = con; |
kvn@3882 | 729 | val &= (1 << bit_width) - 1; // mask off sign bits |
kvn@3882 | 730 | while(bit_width < 32) { |
kvn@3882 | 731 | val |= (val << bit_width); |
kvn@3882 | 732 | bit_width <<= 1; |
kvn@3882 | 733 | } |
kvn@3882 | 734 | jfloat fval = *((jfloat*) &val); // coerce to float type |
kvn@3882 | 735 | return fval; |
kvn@3882 | 736 | } |
kvn@3882 | 737 | |
kvn@3882 | 738 | static inline jdouble replicate8_imm(int con, int width) { |
kvn@3882 | 739 | // Load a constant of "width" (in bytes) and replicate it to fill 64bit. |
kvn@3882 | 740 | assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here"); |
kvn@3882 | 741 | int bit_width = width * 8; |
kvn@3882 | 742 | jlong val = con; |
kvn@3882 | 743 | val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits |
kvn@3882 | 744 | while(bit_width < 64) { |
kvn@3882 | 745 | val |= (val << bit_width); |
kvn@3882 | 746 | bit_width <<= 1; |
kvn@3882 | 747 | } |
kvn@3882 | 748 | jdouble dval = *((jdouble*) &val); // coerce to double type |
kvn@3882 | 749 | return dval; |
kvn@3882 | 750 | } |
kvn@3882 | 751 | |
kvn@3577 | 752 | #ifndef PRODUCT |
kvn@3577 | 753 | void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { |
kvn@3577 | 754 | st->print("nop \t# %d bytes pad for loops and calls", _count); |
kvn@3577 | 755 | } |
kvn@3577 | 756 | #endif |
kvn@3577 | 757 | |
kvn@3577 | 758 | void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { |
kvn@3577 | 759 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 760 | __ nop(_count); |
kvn@3577 | 761 | } |
kvn@3577 | 762 | |
kvn@3577 | 763 | uint MachNopNode::size(PhaseRegAlloc*) const { |
kvn@3577 | 764 | return _count; |
kvn@3577 | 765 | } |
kvn@3577 | 766 | |
kvn@3577 | 767 | #ifndef PRODUCT |
kvn@3577 | 768 | void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const { |
kvn@3577 | 769 | st->print("# breakpoint"); |
kvn@3577 | 770 | } |
kvn@3577 | 771 | #endif |
kvn@3577 | 772 | |
kvn@3577 | 773 | void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const { |
kvn@3577 | 774 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 775 | __ int3(); |
kvn@3577 | 776 | } |
kvn@3577 | 777 | |
kvn@3577 | 778 | uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { |
kvn@3577 | 779 | return MachNode::size(ra_); |
kvn@3577 | 780 | } |
kvn@3577 | 781 | |
kvn@3577 | 782 | %} |
kvn@3577 | 783 | |
kvn@3577 | 784 | encode %{ |
kvn@3577 | 785 | |
kvn@3577 | 786 | enc_class preserve_SP %{ |
kvn@3577 | 787 | debug_only(int off0 = cbuf.insts_size()); |
kvn@3577 | 788 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 789 | // RBP is preserved across all calls, even compiled calls. |
kvn@3577 | 790 | // Use it to preserve RSP in places where the callee might change the SP. |
kvn@3577 | 791 | __ movptr(rbp_mh_SP_save, rsp); |
kvn@3577 | 792 | debug_only(int off1 = cbuf.insts_size()); |
kvn@3577 | 793 | assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); |
kvn@3577 | 794 | %} |
kvn@3577 | 795 | |
kvn@3577 | 796 | enc_class restore_SP %{ |
kvn@3577 | 797 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 798 | __ movptr(rsp, rbp_mh_SP_save); |
kvn@3577 | 799 | %} |
kvn@3577 | 800 | |
kvn@3577 | 801 | enc_class call_epilog %{ |
kvn@3577 | 802 | if (VerifyStackAtCalls) { |
kvn@3577 | 803 | // Check that stack depth is unchanged: find majik cookie on stack |
kvn@3577 | 804 | int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word)); |
kvn@3577 | 805 | MacroAssembler _masm(&cbuf); |
kvn@3577 | 806 | Label L; |
kvn@3577 | 807 | __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d); |
kvn@3577 | 808 | __ jccb(Assembler::equal, L); |
kvn@3577 | 809 | // Die if stack mismatch |
kvn@3577 | 810 | __ int3(); |
kvn@3577 | 811 | __ bind(L); |
kvn@3577 | 812 | } |
kvn@3577 | 813 | %} |
kvn@3577 | 814 | |
kvn@3390 | 815 | %} |
kvn@3390 | 816 | |
kvn@3882 | 817 | |
kvn@3882 | 818 | //----------OPERANDS----------------------------------------------------------- |
kvn@3882 | 819 | // Operand definitions must precede instruction definitions for correct parsing |
kvn@3882 | 820 | // in the ADLC because operands constitute user defined types which are used in |
kvn@3882 | 821 | // instruction definitions. |
kvn@3882 | 822 | |
kvn@3882 | 823 | // Vectors |
kvn@3882 | 824 | operand vecS() %{ |
kvn@3882 | 825 | constraint(ALLOC_IN_RC(vectors_reg)); |
kvn@3882 | 826 | match(VecS); |
kvn@3882 | 827 | |
kvn@3882 | 828 | format %{ %} |
kvn@3882 | 829 | interface(REG_INTER); |
kvn@3882 | 830 | %} |
kvn@3882 | 831 | |
kvn@3882 | 832 | operand vecD() %{ |
kvn@3882 | 833 | constraint(ALLOC_IN_RC(vectord_reg)); |
kvn@3882 | 834 | match(VecD); |
kvn@3882 | 835 | |
kvn@3882 | 836 | format %{ %} |
kvn@3882 | 837 | interface(REG_INTER); |
kvn@3882 | 838 | %} |
kvn@3882 | 839 | |
kvn@3882 | 840 | operand vecX() %{ |
kvn@3882 | 841 | constraint(ALLOC_IN_RC(vectorx_reg)); |
kvn@3882 | 842 | match(VecX); |
kvn@3882 | 843 | |
kvn@3882 | 844 | format %{ %} |
kvn@3882 | 845 | interface(REG_INTER); |
kvn@3882 | 846 | %} |
kvn@3882 | 847 | |
kvn@3882 | 848 | operand vecY() %{ |
kvn@3882 | 849 | constraint(ALLOC_IN_RC(vectory_reg)); |
kvn@3882 | 850 | match(VecY); |
kvn@3882 | 851 | |
kvn@3882 | 852 | format %{ %} |
kvn@3882 | 853 | interface(REG_INTER); |
kvn@3882 | 854 | %} |
kvn@3882 | 855 | |
kvn@3882 | 856 | |
kvn@3390 | 857 | // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit) |
kvn@3390 | 858 | |
kvn@3577 | 859 | // ============================================================================ |
kvn@3577 | 860 | |
kvn@3577 | 861 | instruct ShouldNotReachHere() %{ |
kvn@3577 | 862 | match(Halt); |
kvn@3577 | 863 | format %{ "int3\t# ShouldNotReachHere" %} |
kvn@3577 | 864 | ins_encode %{ |
kvn@3577 | 865 | __ int3(); |
kvn@3577 | 866 | %} |
kvn@3577 | 867 | ins_pipe(pipe_slow); |
kvn@3577 | 868 | %} |
kvn@3577 | 869 | |
kvn@3577 | 870 | // ============================================================================ |
kvn@3577 | 871 | |
kvn@3390 | 872 | instruct addF_reg(regF dst, regF src) %{ |
kvn@3390 | 873 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 874 | match(Set dst (AddF dst src)); |
kvn@3390 | 875 | |
kvn@3390 | 876 | format %{ "addss $dst, $src" %} |
kvn@3390 | 877 | ins_cost(150); |
kvn@3390 | 878 | ins_encode %{ |
kvn@3390 | 879 | __ addss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 880 | %} |
kvn@3390 | 881 | ins_pipe(pipe_slow); |
kvn@3390 | 882 | %} |
kvn@3390 | 883 | |
kvn@3390 | 884 | instruct addF_mem(regF dst, memory src) %{ |
kvn@3390 | 885 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 886 | match(Set dst (AddF dst (LoadF src))); |
kvn@3390 | 887 | |
kvn@3390 | 888 | format %{ "addss $dst, $src" %} |
kvn@3390 | 889 | ins_cost(150); |
kvn@3390 | 890 | ins_encode %{ |
kvn@3390 | 891 | __ addss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 892 | %} |
kvn@3390 | 893 | ins_pipe(pipe_slow); |
kvn@3390 | 894 | %} |
kvn@3390 | 895 | |
kvn@3390 | 896 | instruct addF_imm(regF dst, immF con) %{ |
kvn@3390 | 897 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 898 | match(Set dst (AddF dst con)); |
kvn@3390 | 899 | format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 900 | ins_cost(150); |
kvn@3390 | 901 | ins_encode %{ |
kvn@3390 | 902 | __ addss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 903 | %} |
kvn@3390 | 904 | ins_pipe(pipe_slow); |
kvn@3390 | 905 | %} |
kvn@3390 | 906 | |
kvn@3929 | 907 | instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 908 | predicate(UseAVX > 0); |
kvn@3390 | 909 | match(Set dst (AddF src1 src2)); |
kvn@3390 | 910 | |
kvn@3390 | 911 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 912 | ins_cost(150); |
kvn@3390 | 913 | ins_encode %{ |
kvn@3390 | 914 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 915 | %} |
kvn@3390 | 916 | ins_pipe(pipe_slow); |
kvn@3390 | 917 | %} |
kvn@3390 | 918 | |
kvn@3929 | 919 | instruct addF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 920 | predicate(UseAVX > 0); |
kvn@3390 | 921 | match(Set dst (AddF src1 (LoadF src2))); |
kvn@3390 | 922 | |
kvn@3390 | 923 | format %{ "vaddss $dst, $src1, $src2" %} |
kvn@3390 | 924 | ins_cost(150); |
kvn@3390 | 925 | ins_encode %{ |
kvn@3390 | 926 | __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 927 | %} |
kvn@3390 | 928 | ins_pipe(pipe_slow); |
kvn@3390 | 929 | %} |
kvn@3390 | 930 | |
kvn@3929 | 931 | instruct addF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 932 | predicate(UseAVX > 0); |
kvn@3390 | 933 | match(Set dst (AddF src con)); |
kvn@3390 | 934 | |
kvn@3390 | 935 | format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 936 | ins_cost(150); |
kvn@3390 | 937 | ins_encode %{ |
kvn@3390 | 938 | __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 939 | %} |
kvn@3390 | 940 | ins_pipe(pipe_slow); |
kvn@3390 | 941 | %} |
kvn@3390 | 942 | |
kvn@3390 | 943 | instruct addD_reg(regD dst, regD src) %{ |
kvn@3390 | 944 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 945 | match(Set dst (AddD dst src)); |
kvn@3390 | 946 | |
kvn@3390 | 947 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 948 | ins_cost(150); |
kvn@3390 | 949 | ins_encode %{ |
kvn@3390 | 950 | __ addsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 951 | %} |
kvn@3390 | 952 | ins_pipe(pipe_slow); |
kvn@3390 | 953 | %} |
kvn@3390 | 954 | |
kvn@3390 | 955 | instruct addD_mem(regD dst, memory src) %{ |
kvn@3390 | 956 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 957 | match(Set dst (AddD dst (LoadD src))); |
kvn@3390 | 958 | |
kvn@3390 | 959 | format %{ "addsd $dst, $src" %} |
kvn@3390 | 960 | ins_cost(150); |
kvn@3390 | 961 | ins_encode %{ |
kvn@3390 | 962 | __ addsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 963 | %} |
kvn@3390 | 964 | ins_pipe(pipe_slow); |
kvn@3390 | 965 | %} |
kvn@3390 | 966 | |
kvn@3390 | 967 | instruct addD_imm(regD dst, immD con) %{ |
kvn@3390 | 968 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 969 | match(Set dst (AddD dst con)); |
kvn@3390 | 970 | format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 971 | ins_cost(150); |
kvn@3390 | 972 | ins_encode %{ |
kvn@3390 | 973 | __ addsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 974 | %} |
kvn@3390 | 975 | ins_pipe(pipe_slow); |
kvn@3390 | 976 | %} |
kvn@3390 | 977 | |
kvn@3929 | 978 | instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 979 | predicate(UseAVX > 0); |
kvn@3390 | 980 | match(Set dst (AddD src1 src2)); |
kvn@3390 | 981 | |
kvn@3390 | 982 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 983 | ins_cost(150); |
kvn@3390 | 984 | ins_encode %{ |
kvn@3390 | 985 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 986 | %} |
kvn@3390 | 987 | ins_pipe(pipe_slow); |
kvn@3390 | 988 | %} |
kvn@3390 | 989 | |
kvn@3929 | 990 | instruct addD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 991 | predicate(UseAVX > 0); |
kvn@3390 | 992 | match(Set dst (AddD src1 (LoadD src2))); |
kvn@3390 | 993 | |
kvn@3390 | 994 | format %{ "vaddsd $dst, $src1, $src2" %} |
kvn@3390 | 995 | ins_cost(150); |
kvn@3390 | 996 | ins_encode %{ |
kvn@3390 | 997 | __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 998 | %} |
kvn@3390 | 999 | ins_pipe(pipe_slow); |
kvn@3390 | 1000 | %} |
kvn@3390 | 1001 | |
kvn@3929 | 1002 | instruct addD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1003 | predicate(UseAVX > 0); |
kvn@3390 | 1004 | match(Set dst (AddD src con)); |
kvn@3390 | 1005 | |
kvn@3390 | 1006 | format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1007 | ins_cost(150); |
kvn@3390 | 1008 | ins_encode %{ |
kvn@3390 | 1009 | __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1010 | %} |
kvn@3390 | 1011 | ins_pipe(pipe_slow); |
kvn@3390 | 1012 | %} |
kvn@3390 | 1013 | |
kvn@3390 | 1014 | instruct subF_reg(regF dst, regF src) %{ |
kvn@3390 | 1015 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1016 | match(Set dst (SubF dst src)); |
kvn@3390 | 1017 | |
kvn@3390 | 1018 | format %{ "subss $dst, $src" %} |
kvn@3390 | 1019 | ins_cost(150); |
kvn@3390 | 1020 | ins_encode %{ |
kvn@3390 | 1021 | __ subss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1022 | %} |
kvn@3390 | 1023 | ins_pipe(pipe_slow); |
kvn@3390 | 1024 | %} |
kvn@3390 | 1025 | |
kvn@3390 | 1026 | instruct subF_mem(regF dst, memory src) %{ |
kvn@3390 | 1027 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1028 | match(Set dst (SubF dst (LoadF src))); |
kvn@3390 | 1029 | |
kvn@3390 | 1030 | format %{ "subss $dst, $src" %} |
kvn@3390 | 1031 | ins_cost(150); |
kvn@3390 | 1032 | ins_encode %{ |
kvn@3390 | 1033 | __ subss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1034 | %} |
kvn@3390 | 1035 | ins_pipe(pipe_slow); |
kvn@3390 | 1036 | %} |
kvn@3390 | 1037 | |
kvn@3390 | 1038 | instruct subF_imm(regF dst, immF con) %{ |
kvn@3390 | 1039 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1040 | match(Set dst (SubF dst con)); |
kvn@3390 | 1041 | format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1042 | ins_cost(150); |
kvn@3390 | 1043 | ins_encode %{ |
kvn@3390 | 1044 | __ subss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1045 | %} |
kvn@3390 | 1046 | ins_pipe(pipe_slow); |
kvn@3390 | 1047 | %} |
kvn@3390 | 1048 | |
kvn@3929 | 1049 | instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1050 | predicate(UseAVX > 0); |
kvn@3390 | 1051 | match(Set dst (SubF src1 src2)); |
kvn@3390 | 1052 | |
kvn@3390 | 1053 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 1054 | ins_cost(150); |
kvn@3390 | 1055 | ins_encode %{ |
kvn@3390 | 1056 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1057 | %} |
kvn@3390 | 1058 | ins_pipe(pipe_slow); |
kvn@3390 | 1059 | %} |
kvn@3390 | 1060 | |
kvn@3929 | 1061 | instruct subF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1062 | predicate(UseAVX > 0); |
kvn@3390 | 1063 | match(Set dst (SubF src1 (LoadF src2))); |
kvn@3390 | 1064 | |
kvn@3390 | 1065 | format %{ "vsubss $dst, $src1, $src2" %} |
kvn@3390 | 1066 | ins_cost(150); |
kvn@3390 | 1067 | ins_encode %{ |
kvn@3390 | 1068 | __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1069 | %} |
kvn@3390 | 1070 | ins_pipe(pipe_slow); |
kvn@3390 | 1071 | %} |
kvn@3390 | 1072 | |
kvn@3929 | 1073 | instruct subF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1074 | predicate(UseAVX > 0); |
kvn@3390 | 1075 | match(Set dst (SubF src con)); |
kvn@3390 | 1076 | |
kvn@3390 | 1077 | format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1078 | ins_cost(150); |
kvn@3390 | 1079 | ins_encode %{ |
kvn@3390 | 1080 | __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1081 | %} |
kvn@3390 | 1082 | ins_pipe(pipe_slow); |
kvn@3390 | 1083 | %} |
kvn@3390 | 1084 | |
kvn@3390 | 1085 | instruct subD_reg(regD dst, regD src) %{ |
kvn@3390 | 1086 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1087 | match(Set dst (SubD dst src)); |
kvn@3390 | 1088 | |
kvn@3390 | 1089 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 1090 | ins_cost(150); |
kvn@3390 | 1091 | ins_encode %{ |
kvn@3390 | 1092 | __ subsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1093 | %} |
kvn@3390 | 1094 | ins_pipe(pipe_slow); |
kvn@3390 | 1095 | %} |
kvn@3390 | 1096 | |
kvn@3390 | 1097 | instruct subD_mem(regD dst, memory src) %{ |
kvn@3390 | 1098 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1099 | match(Set dst (SubD dst (LoadD src))); |
kvn@3390 | 1100 | |
kvn@3390 | 1101 | format %{ "subsd $dst, $src" %} |
kvn@3390 | 1102 | ins_cost(150); |
kvn@3390 | 1103 | ins_encode %{ |
kvn@3390 | 1104 | __ subsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1105 | %} |
kvn@3390 | 1106 | ins_pipe(pipe_slow); |
kvn@3390 | 1107 | %} |
kvn@3390 | 1108 | |
kvn@3390 | 1109 | instruct subD_imm(regD dst, immD con) %{ |
kvn@3390 | 1110 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1111 | match(Set dst (SubD dst con)); |
kvn@3390 | 1112 | format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1113 | ins_cost(150); |
kvn@3390 | 1114 | ins_encode %{ |
kvn@3390 | 1115 | __ subsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1116 | %} |
kvn@3390 | 1117 | ins_pipe(pipe_slow); |
kvn@3390 | 1118 | %} |
kvn@3390 | 1119 | |
kvn@3929 | 1120 | instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1121 | predicate(UseAVX > 0); |
kvn@3390 | 1122 | match(Set dst (SubD src1 src2)); |
kvn@3390 | 1123 | |
kvn@3390 | 1124 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 1125 | ins_cost(150); |
kvn@3390 | 1126 | ins_encode %{ |
kvn@3390 | 1127 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1128 | %} |
kvn@3390 | 1129 | ins_pipe(pipe_slow); |
kvn@3390 | 1130 | %} |
kvn@3390 | 1131 | |
kvn@3929 | 1132 | instruct subD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1133 | predicate(UseAVX > 0); |
kvn@3390 | 1134 | match(Set dst (SubD src1 (LoadD src2))); |
kvn@3390 | 1135 | |
kvn@3390 | 1136 | format %{ "vsubsd $dst, $src1, $src2" %} |
kvn@3390 | 1137 | ins_cost(150); |
kvn@3390 | 1138 | ins_encode %{ |
kvn@3390 | 1139 | __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1140 | %} |
kvn@3390 | 1141 | ins_pipe(pipe_slow); |
kvn@3390 | 1142 | %} |
kvn@3390 | 1143 | |
kvn@3929 | 1144 | instruct subD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1145 | predicate(UseAVX > 0); |
kvn@3390 | 1146 | match(Set dst (SubD src con)); |
kvn@3390 | 1147 | |
kvn@3390 | 1148 | format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1149 | ins_cost(150); |
kvn@3390 | 1150 | ins_encode %{ |
kvn@3390 | 1151 | __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1152 | %} |
kvn@3390 | 1153 | ins_pipe(pipe_slow); |
kvn@3390 | 1154 | %} |
kvn@3390 | 1155 | |
kvn@3390 | 1156 | instruct mulF_reg(regF dst, regF src) %{ |
kvn@3390 | 1157 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1158 | match(Set dst (MulF dst src)); |
kvn@3390 | 1159 | |
kvn@3390 | 1160 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 1161 | ins_cost(150); |
kvn@3390 | 1162 | ins_encode %{ |
kvn@3390 | 1163 | __ mulss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1164 | %} |
kvn@3390 | 1165 | ins_pipe(pipe_slow); |
kvn@3390 | 1166 | %} |
kvn@3390 | 1167 | |
kvn@3390 | 1168 | instruct mulF_mem(regF dst, memory src) %{ |
kvn@3390 | 1169 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1170 | match(Set dst (MulF dst (LoadF src))); |
kvn@3390 | 1171 | |
kvn@3390 | 1172 | format %{ "mulss $dst, $src" %} |
kvn@3390 | 1173 | ins_cost(150); |
kvn@3390 | 1174 | ins_encode %{ |
kvn@3390 | 1175 | __ mulss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1176 | %} |
kvn@3390 | 1177 | ins_pipe(pipe_slow); |
kvn@3390 | 1178 | %} |
kvn@3390 | 1179 | |
kvn@3390 | 1180 | instruct mulF_imm(regF dst, immF con) %{ |
kvn@3390 | 1181 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1182 | match(Set dst (MulF dst con)); |
kvn@3390 | 1183 | format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1184 | ins_cost(150); |
kvn@3390 | 1185 | ins_encode %{ |
kvn@3390 | 1186 | __ mulss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1187 | %} |
kvn@3390 | 1188 | ins_pipe(pipe_slow); |
kvn@3390 | 1189 | %} |
kvn@3390 | 1190 | |
kvn@3929 | 1191 | instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1192 | predicate(UseAVX > 0); |
kvn@3390 | 1193 | match(Set dst (MulF src1 src2)); |
kvn@3390 | 1194 | |
kvn@3390 | 1195 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 1196 | ins_cost(150); |
kvn@3390 | 1197 | ins_encode %{ |
kvn@3390 | 1198 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1199 | %} |
kvn@3390 | 1200 | ins_pipe(pipe_slow); |
kvn@3390 | 1201 | %} |
kvn@3390 | 1202 | |
kvn@3929 | 1203 | instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1204 | predicate(UseAVX > 0); |
kvn@3390 | 1205 | match(Set dst (MulF src1 (LoadF src2))); |
kvn@3390 | 1206 | |
kvn@3390 | 1207 | format %{ "vmulss $dst, $src1, $src2" %} |
kvn@3390 | 1208 | ins_cost(150); |
kvn@3390 | 1209 | ins_encode %{ |
kvn@3390 | 1210 | __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1211 | %} |
kvn@3390 | 1212 | ins_pipe(pipe_slow); |
kvn@3390 | 1213 | %} |
kvn@3390 | 1214 | |
kvn@3929 | 1215 | instruct mulF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1216 | predicate(UseAVX > 0); |
kvn@3390 | 1217 | match(Set dst (MulF src con)); |
kvn@3390 | 1218 | |
kvn@3390 | 1219 | format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1220 | ins_cost(150); |
kvn@3390 | 1221 | ins_encode %{ |
kvn@3390 | 1222 | __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1223 | %} |
kvn@3390 | 1224 | ins_pipe(pipe_slow); |
kvn@3390 | 1225 | %} |
kvn@3390 | 1226 | |
kvn@3390 | 1227 | instruct mulD_reg(regD dst, regD src) %{ |
kvn@3390 | 1228 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1229 | match(Set dst (MulD dst src)); |
kvn@3390 | 1230 | |
kvn@3390 | 1231 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 1232 | ins_cost(150); |
kvn@3390 | 1233 | ins_encode %{ |
kvn@3390 | 1234 | __ mulsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1235 | %} |
kvn@3390 | 1236 | ins_pipe(pipe_slow); |
kvn@3390 | 1237 | %} |
kvn@3390 | 1238 | |
kvn@3390 | 1239 | instruct mulD_mem(regD dst, memory src) %{ |
kvn@3390 | 1240 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1241 | match(Set dst (MulD dst (LoadD src))); |
kvn@3390 | 1242 | |
kvn@3390 | 1243 | format %{ "mulsd $dst, $src" %} |
kvn@3390 | 1244 | ins_cost(150); |
kvn@3390 | 1245 | ins_encode %{ |
kvn@3390 | 1246 | __ mulsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1247 | %} |
kvn@3390 | 1248 | ins_pipe(pipe_slow); |
kvn@3390 | 1249 | %} |
kvn@3390 | 1250 | |
kvn@3390 | 1251 | instruct mulD_imm(regD dst, immD con) %{ |
kvn@3390 | 1252 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1253 | match(Set dst (MulD dst con)); |
kvn@3390 | 1254 | format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1255 | ins_cost(150); |
kvn@3390 | 1256 | ins_encode %{ |
kvn@3390 | 1257 | __ mulsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1258 | %} |
kvn@3390 | 1259 | ins_pipe(pipe_slow); |
kvn@3390 | 1260 | %} |
kvn@3390 | 1261 | |
kvn@3929 | 1262 | instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1263 | predicate(UseAVX > 0); |
kvn@3390 | 1264 | match(Set dst (MulD src1 src2)); |
kvn@3390 | 1265 | |
kvn@3390 | 1266 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 1267 | ins_cost(150); |
kvn@3390 | 1268 | ins_encode %{ |
kvn@3390 | 1269 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1270 | %} |
kvn@3390 | 1271 | ins_pipe(pipe_slow); |
kvn@3390 | 1272 | %} |
kvn@3390 | 1273 | |
kvn@3929 | 1274 | instruct mulD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1275 | predicate(UseAVX > 0); |
kvn@3390 | 1276 | match(Set dst (MulD src1 (LoadD src2))); |
kvn@3390 | 1277 | |
kvn@3390 | 1278 | format %{ "vmulsd $dst, $src1, $src2" %} |
kvn@3390 | 1279 | ins_cost(150); |
kvn@3390 | 1280 | ins_encode %{ |
kvn@3390 | 1281 | __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1282 | %} |
kvn@3390 | 1283 | ins_pipe(pipe_slow); |
kvn@3390 | 1284 | %} |
kvn@3390 | 1285 | |
kvn@3929 | 1286 | instruct mulD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1287 | predicate(UseAVX > 0); |
kvn@3390 | 1288 | match(Set dst (MulD src con)); |
kvn@3390 | 1289 | |
kvn@3390 | 1290 | format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1291 | ins_cost(150); |
kvn@3390 | 1292 | ins_encode %{ |
kvn@3390 | 1293 | __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1294 | %} |
kvn@3390 | 1295 | ins_pipe(pipe_slow); |
kvn@3390 | 1296 | %} |
kvn@3390 | 1297 | |
kvn@3390 | 1298 | instruct divF_reg(regF dst, regF src) %{ |
kvn@3390 | 1299 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1300 | match(Set dst (DivF dst src)); |
kvn@3390 | 1301 | |
kvn@3390 | 1302 | format %{ "divss $dst, $src" %} |
kvn@3390 | 1303 | ins_cost(150); |
kvn@3390 | 1304 | ins_encode %{ |
kvn@3390 | 1305 | __ divss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1306 | %} |
kvn@3390 | 1307 | ins_pipe(pipe_slow); |
kvn@3390 | 1308 | %} |
kvn@3390 | 1309 | |
kvn@3390 | 1310 | instruct divF_mem(regF dst, memory src) %{ |
kvn@3390 | 1311 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1312 | match(Set dst (DivF dst (LoadF src))); |
kvn@3390 | 1313 | |
kvn@3390 | 1314 | format %{ "divss $dst, $src" %} |
kvn@3390 | 1315 | ins_cost(150); |
kvn@3390 | 1316 | ins_encode %{ |
kvn@3390 | 1317 | __ divss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1318 | %} |
kvn@3390 | 1319 | ins_pipe(pipe_slow); |
kvn@3390 | 1320 | %} |
kvn@3390 | 1321 | |
kvn@3390 | 1322 | instruct divF_imm(regF dst, immF con) %{ |
kvn@3390 | 1323 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1324 | match(Set dst (DivF dst con)); |
kvn@3390 | 1325 | format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1326 | ins_cost(150); |
kvn@3390 | 1327 | ins_encode %{ |
kvn@3390 | 1328 | __ divss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1329 | %} |
kvn@3390 | 1330 | ins_pipe(pipe_slow); |
kvn@3390 | 1331 | %} |
kvn@3390 | 1332 | |
kvn@3929 | 1333 | instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ |
kvn@3390 | 1334 | predicate(UseAVX > 0); |
kvn@3390 | 1335 | match(Set dst (DivF src1 src2)); |
kvn@3390 | 1336 | |
kvn@3390 | 1337 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 1338 | ins_cost(150); |
kvn@3390 | 1339 | ins_encode %{ |
kvn@3390 | 1340 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1341 | %} |
kvn@3390 | 1342 | ins_pipe(pipe_slow); |
kvn@3390 | 1343 | %} |
kvn@3390 | 1344 | |
kvn@3929 | 1345 | instruct divF_reg_mem(regF dst, regF src1, memory src2) %{ |
kvn@3390 | 1346 | predicate(UseAVX > 0); |
kvn@3390 | 1347 | match(Set dst (DivF src1 (LoadF src2))); |
kvn@3390 | 1348 | |
kvn@3390 | 1349 | format %{ "vdivss $dst, $src1, $src2" %} |
kvn@3390 | 1350 | ins_cost(150); |
kvn@3390 | 1351 | ins_encode %{ |
kvn@3390 | 1352 | __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1353 | %} |
kvn@3390 | 1354 | ins_pipe(pipe_slow); |
kvn@3390 | 1355 | %} |
kvn@3390 | 1356 | |
kvn@3929 | 1357 | instruct divF_reg_imm(regF dst, regF src, immF con) %{ |
kvn@3390 | 1358 | predicate(UseAVX > 0); |
kvn@3390 | 1359 | match(Set dst (DivF src con)); |
kvn@3390 | 1360 | |
kvn@3390 | 1361 | format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1362 | ins_cost(150); |
kvn@3390 | 1363 | ins_encode %{ |
kvn@3390 | 1364 | __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1365 | %} |
kvn@3390 | 1366 | ins_pipe(pipe_slow); |
kvn@3390 | 1367 | %} |
kvn@3390 | 1368 | |
kvn@3390 | 1369 | instruct divD_reg(regD dst, regD src) %{ |
kvn@3390 | 1370 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1371 | match(Set dst (DivD dst src)); |
kvn@3390 | 1372 | |
kvn@3390 | 1373 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 1374 | ins_cost(150); |
kvn@3390 | 1375 | ins_encode %{ |
kvn@3390 | 1376 | __ divsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1377 | %} |
kvn@3390 | 1378 | ins_pipe(pipe_slow); |
kvn@3390 | 1379 | %} |
kvn@3390 | 1380 | |
kvn@3390 | 1381 | instruct divD_mem(regD dst, memory src) %{ |
kvn@3390 | 1382 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1383 | match(Set dst (DivD dst (LoadD src))); |
kvn@3390 | 1384 | |
kvn@3390 | 1385 | format %{ "divsd $dst, $src" %} |
kvn@3390 | 1386 | ins_cost(150); |
kvn@3390 | 1387 | ins_encode %{ |
kvn@3390 | 1388 | __ divsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1389 | %} |
kvn@3390 | 1390 | ins_pipe(pipe_slow); |
kvn@3390 | 1391 | %} |
kvn@3390 | 1392 | |
kvn@3390 | 1393 | instruct divD_imm(regD dst, immD con) %{ |
kvn@3390 | 1394 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1395 | match(Set dst (DivD dst con)); |
kvn@3390 | 1396 | format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1397 | ins_cost(150); |
kvn@3390 | 1398 | ins_encode %{ |
kvn@3390 | 1399 | __ divsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1400 | %} |
kvn@3390 | 1401 | ins_pipe(pipe_slow); |
kvn@3390 | 1402 | %} |
kvn@3390 | 1403 | |
kvn@3929 | 1404 | instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ |
kvn@3390 | 1405 | predicate(UseAVX > 0); |
kvn@3390 | 1406 | match(Set dst (DivD src1 src2)); |
kvn@3390 | 1407 | |
kvn@3390 | 1408 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 1409 | ins_cost(150); |
kvn@3390 | 1410 | ins_encode %{ |
kvn@3390 | 1411 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
kvn@3390 | 1412 | %} |
kvn@3390 | 1413 | ins_pipe(pipe_slow); |
kvn@3390 | 1414 | %} |
kvn@3390 | 1415 | |
kvn@3929 | 1416 | instruct divD_reg_mem(regD dst, regD src1, memory src2) %{ |
kvn@3390 | 1417 | predicate(UseAVX > 0); |
kvn@3390 | 1418 | match(Set dst (DivD src1 (LoadD src2))); |
kvn@3390 | 1419 | |
kvn@3390 | 1420 | format %{ "vdivsd $dst, $src1, $src2" %} |
kvn@3390 | 1421 | ins_cost(150); |
kvn@3390 | 1422 | ins_encode %{ |
kvn@3390 | 1423 | __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
kvn@3390 | 1424 | %} |
kvn@3390 | 1425 | ins_pipe(pipe_slow); |
kvn@3390 | 1426 | %} |
kvn@3390 | 1427 | |
kvn@3929 | 1428 | instruct divD_reg_imm(regD dst, regD src, immD con) %{ |
kvn@3390 | 1429 | predicate(UseAVX > 0); |
kvn@3390 | 1430 | match(Set dst (DivD src con)); |
kvn@3390 | 1431 | |
kvn@3390 | 1432 | format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1433 | ins_cost(150); |
kvn@3390 | 1434 | ins_encode %{ |
kvn@3390 | 1435 | __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1436 | %} |
kvn@3390 | 1437 | ins_pipe(pipe_slow); |
kvn@3390 | 1438 | %} |
kvn@3390 | 1439 | |
kvn@3390 | 1440 | instruct absF_reg(regF dst) %{ |
kvn@3390 | 1441 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1442 | match(Set dst (AbsF dst)); |
kvn@3390 | 1443 | ins_cost(150); |
kvn@3390 | 1444 | format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 1445 | ins_encode %{ |
kvn@3390 | 1446 | __ andps($dst$$XMMRegister, ExternalAddress(float_signmask())); |
kvn@3390 | 1447 | %} |
kvn@3390 | 1448 | ins_pipe(pipe_slow); |
kvn@3390 | 1449 | %} |
kvn@3390 | 1450 | |
kvn@3929 | 1451 | instruct absF_reg_reg(regF dst, regF src) %{ |
kvn@3390 | 1452 | predicate(UseAVX > 0); |
kvn@3390 | 1453 | match(Set dst (AbsF src)); |
kvn@3390 | 1454 | ins_cost(150); |
kvn@3390 | 1455 | format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %} |
kvn@3390 | 1456 | ins_encode %{ |
kvn@4001 | 1457 | bool vector256 = false; |
kvn@3390 | 1458 | __ vandps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1459 | ExternalAddress(float_signmask()), vector256); |
kvn@3390 | 1460 | %} |
kvn@3390 | 1461 | ins_pipe(pipe_slow); |
kvn@3390 | 1462 | %} |
kvn@3390 | 1463 | |
kvn@3390 | 1464 | instruct absD_reg(regD dst) %{ |
kvn@3390 | 1465 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1466 | match(Set dst (AbsD dst)); |
kvn@3390 | 1467 | ins_cost(150); |
kvn@3390 | 1468 | format %{ "andpd $dst, [0x7fffffffffffffff]\t" |
kvn@3390 | 1469 | "# abs double by sign masking" %} |
kvn@3390 | 1470 | ins_encode %{ |
kvn@3390 | 1471 | __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask())); |
kvn@3390 | 1472 | %} |
kvn@3390 | 1473 | ins_pipe(pipe_slow); |
kvn@3390 | 1474 | %} |
kvn@3390 | 1475 | |
kvn@3929 | 1476 | instruct absD_reg_reg(regD dst, regD src) %{ |
kvn@3390 | 1477 | predicate(UseAVX > 0); |
kvn@3390 | 1478 | match(Set dst (AbsD src)); |
kvn@3390 | 1479 | ins_cost(150); |
kvn@3390 | 1480 | format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t" |
kvn@3390 | 1481 | "# abs double by sign masking" %} |
kvn@3390 | 1482 | ins_encode %{ |
kvn@4001 | 1483 | bool vector256 = false; |
kvn@3390 | 1484 | __ vandpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1485 | ExternalAddress(double_signmask()), vector256); |
kvn@3390 | 1486 | %} |
kvn@3390 | 1487 | ins_pipe(pipe_slow); |
kvn@3390 | 1488 | %} |
kvn@3390 | 1489 | |
kvn@3390 | 1490 | instruct negF_reg(regF dst) %{ |
kvn@3390 | 1491 | predicate((UseSSE>=1) && (UseAVX == 0)); |
kvn@3390 | 1492 | match(Set dst (NegF dst)); |
kvn@3390 | 1493 | ins_cost(150); |
kvn@3390 | 1494 | format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 1495 | ins_encode %{ |
kvn@3390 | 1496 | __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip())); |
kvn@3390 | 1497 | %} |
kvn@3390 | 1498 | ins_pipe(pipe_slow); |
kvn@3390 | 1499 | %} |
kvn@3390 | 1500 | |
kvn@3929 | 1501 | instruct negF_reg_reg(regF dst, regF src) %{ |
kvn@3390 | 1502 | predicate(UseAVX > 0); |
kvn@3390 | 1503 | match(Set dst (NegF src)); |
kvn@3390 | 1504 | ins_cost(150); |
kvn@3390 | 1505 | format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %} |
kvn@3390 | 1506 | ins_encode %{ |
kvn@4001 | 1507 | bool vector256 = false; |
kvn@3390 | 1508 | __ vxorps($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1509 | ExternalAddress(float_signflip()), vector256); |
kvn@3390 | 1510 | %} |
kvn@3390 | 1511 | ins_pipe(pipe_slow); |
kvn@3390 | 1512 | %} |
kvn@3390 | 1513 | |
kvn@3390 | 1514 | instruct negD_reg(regD dst) %{ |
kvn@3390 | 1515 | predicate((UseSSE>=2) && (UseAVX == 0)); |
kvn@3390 | 1516 | match(Set dst (NegD dst)); |
kvn@3390 | 1517 | ins_cost(150); |
kvn@3390 | 1518 | format %{ "xorpd $dst, [0x8000000000000000]\t" |
kvn@3390 | 1519 | "# neg double by sign flipping" %} |
kvn@3390 | 1520 | ins_encode %{ |
kvn@3390 | 1521 | __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip())); |
kvn@3390 | 1522 | %} |
kvn@3390 | 1523 | ins_pipe(pipe_slow); |
kvn@3390 | 1524 | %} |
kvn@3390 | 1525 | |
kvn@3929 | 1526 | instruct negD_reg_reg(regD dst, regD src) %{ |
kvn@3390 | 1527 | predicate(UseAVX > 0); |
kvn@3390 | 1528 | match(Set dst (NegD src)); |
kvn@3390 | 1529 | ins_cost(150); |
kvn@3390 | 1530 | format %{ "vxorpd $dst, $src, [0x8000000000000000]\t" |
kvn@3390 | 1531 | "# neg double by sign flipping" %} |
kvn@3390 | 1532 | ins_encode %{ |
kvn@4001 | 1533 | bool vector256 = false; |
kvn@3390 | 1534 | __ vxorpd($dst$$XMMRegister, $src$$XMMRegister, |
kvn@4001 | 1535 | ExternalAddress(double_signflip()), vector256); |
kvn@3390 | 1536 | %} |
kvn@3390 | 1537 | ins_pipe(pipe_slow); |
kvn@3390 | 1538 | %} |
kvn@3390 | 1539 | |
kvn@3390 | 1540 | instruct sqrtF_reg(regF dst, regF src) %{ |
kvn@3390 | 1541 | predicate(UseSSE>=1); |
kvn@3390 | 1542 | match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); |
kvn@3390 | 1543 | |
kvn@3390 | 1544 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 1545 | ins_cost(150); |
kvn@3390 | 1546 | ins_encode %{ |
kvn@3390 | 1547 | __ sqrtss($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1548 | %} |
kvn@3390 | 1549 | ins_pipe(pipe_slow); |
kvn@3390 | 1550 | %} |
kvn@3390 | 1551 | |
kvn@3390 | 1552 | instruct sqrtF_mem(regF dst, memory src) %{ |
kvn@3390 | 1553 | predicate(UseSSE>=1); |
kvn@3390 | 1554 | match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src))))); |
kvn@3390 | 1555 | |
kvn@3390 | 1556 | format %{ "sqrtss $dst, $src" %} |
kvn@3390 | 1557 | ins_cost(150); |
kvn@3390 | 1558 | ins_encode %{ |
kvn@3390 | 1559 | __ sqrtss($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1560 | %} |
kvn@3390 | 1561 | ins_pipe(pipe_slow); |
kvn@3390 | 1562 | %} |
kvn@3390 | 1563 | |
kvn@3390 | 1564 | instruct sqrtF_imm(regF dst, immF con) %{ |
kvn@3390 | 1565 | predicate(UseSSE>=1); |
kvn@3390 | 1566 | match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); |
kvn@3390 | 1567 | format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
kvn@3390 | 1568 | ins_cost(150); |
kvn@3390 | 1569 | ins_encode %{ |
kvn@3390 | 1570 | __ sqrtss($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1571 | %} |
kvn@3390 | 1572 | ins_pipe(pipe_slow); |
kvn@3390 | 1573 | %} |
kvn@3390 | 1574 | |
kvn@3390 | 1575 | instruct sqrtD_reg(regD dst, regD src) %{ |
kvn@3390 | 1576 | predicate(UseSSE>=2); |
kvn@3390 | 1577 | match(Set dst (SqrtD src)); |
kvn@3390 | 1578 | |
kvn@3390 | 1579 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 1580 | ins_cost(150); |
kvn@3390 | 1581 | ins_encode %{ |
kvn@3390 | 1582 | __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@3390 | 1583 | %} |
kvn@3390 | 1584 | ins_pipe(pipe_slow); |
kvn@3390 | 1585 | %} |
kvn@3390 | 1586 | |
kvn@3390 | 1587 | instruct sqrtD_mem(regD dst, memory src) %{ |
kvn@3390 | 1588 | predicate(UseSSE>=2); |
kvn@3390 | 1589 | match(Set dst (SqrtD (LoadD src))); |
kvn@3390 | 1590 | |
kvn@3390 | 1591 | format %{ "sqrtsd $dst, $src" %} |
kvn@3390 | 1592 | ins_cost(150); |
kvn@3390 | 1593 | ins_encode %{ |
kvn@3390 | 1594 | __ sqrtsd($dst$$XMMRegister, $src$$Address); |
kvn@3390 | 1595 | %} |
kvn@3390 | 1596 | ins_pipe(pipe_slow); |
kvn@3390 | 1597 | %} |
kvn@3390 | 1598 | |
kvn@3390 | 1599 | instruct sqrtD_imm(regD dst, immD con) %{ |
kvn@3390 | 1600 | predicate(UseSSE>=2); |
kvn@3390 | 1601 | match(Set dst (SqrtD con)); |
kvn@3390 | 1602 | format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
kvn@3390 | 1603 | ins_cost(150); |
kvn@3390 | 1604 | ins_encode %{ |
kvn@3390 | 1605 | __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); |
kvn@3390 | 1606 | %} |
kvn@3390 | 1607 | ins_pipe(pipe_slow); |
kvn@3390 | 1608 | %} |
kvn@3390 | 1609 | |
kvn@3882 | 1610 | |
kvn@3882 | 1611 | // ====================VECTOR INSTRUCTIONS===================================== |
kvn@3882 | 1612 | |
kvn@3882 | 1613 | // Load vectors (4 bytes long) |
kvn@3882 | 1614 | instruct loadV4(vecS dst, memory mem) %{ |
kvn@3882 | 1615 | predicate(n->as_LoadVector()->memory_size() == 4); |
kvn@3882 | 1616 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1617 | ins_cost(125); |
kvn@3882 | 1618 | format %{ "movd $dst,$mem\t! load vector (4 bytes)" %} |
kvn@3882 | 1619 | ins_encode %{ |
kvn@3882 | 1620 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1621 | %} |
kvn@3882 | 1622 | ins_pipe( pipe_slow ); |
kvn@3882 | 1623 | %} |
kvn@3882 | 1624 | |
kvn@3882 | 1625 | // Load vectors (8 bytes long) |
kvn@3882 | 1626 | instruct loadV8(vecD dst, memory mem) %{ |
kvn@3882 | 1627 | predicate(n->as_LoadVector()->memory_size() == 8); |
kvn@3882 | 1628 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1629 | ins_cost(125); |
kvn@3882 | 1630 | format %{ "movq $dst,$mem\t! load vector (8 bytes)" %} |
kvn@3882 | 1631 | ins_encode %{ |
kvn@3882 | 1632 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1633 | %} |
kvn@3882 | 1634 | ins_pipe( pipe_slow ); |
kvn@3882 | 1635 | %} |
kvn@3882 | 1636 | |
kvn@3882 | 1637 | // Load vectors (16 bytes long) |
kvn@3882 | 1638 | instruct loadV16(vecX dst, memory mem) %{ |
kvn@3882 | 1639 | predicate(n->as_LoadVector()->memory_size() == 16); |
kvn@3882 | 1640 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1641 | ins_cost(125); |
kvn@3882 | 1642 | format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %} |
kvn@3882 | 1643 | ins_encode %{ |
kvn@3882 | 1644 | __ movdqu($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1645 | %} |
kvn@3882 | 1646 | ins_pipe( pipe_slow ); |
kvn@3882 | 1647 | %} |
kvn@3882 | 1648 | |
kvn@3882 | 1649 | // Load vectors (32 bytes long) |
kvn@3882 | 1650 | instruct loadV32(vecY dst, memory mem) %{ |
kvn@3882 | 1651 | predicate(n->as_LoadVector()->memory_size() == 32); |
kvn@3882 | 1652 | match(Set dst (LoadVector mem)); |
kvn@3882 | 1653 | ins_cost(125); |
kvn@3882 | 1654 | format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %} |
kvn@3882 | 1655 | ins_encode %{ |
kvn@3882 | 1656 | __ vmovdqu($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 1657 | %} |
kvn@3882 | 1658 | ins_pipe( pipe_slow ); |
kvn@3882 | 1659 | %} |
kvn@3882 | 1660 | |
kvn@3882 | 1661 | // Store vectors |
kvn@3882 | 1662 | instruct storeV4(memory mem, vecS src) %{ |
kvn@3882 | 1663 | predicate(n->as_StoreVector()->memory_size() == 4); |
kvn@3882 | 1664 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1665 | ins_cost(145); |
kvn@3882 | 1666 | format %{ "movd $mem,$src\t! store vector (4 bytes)" %} |
kvn@3882 | 1667 | ins_encode %{ |
kvn@3882 | 1668 | __ movdl($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1669 | %} |
kvn@3882 | 1670 | ins_pipe( pipe_slow ); |
kvn@3882 | 1671 | %} |
kvn@3882 | 1672 | |
kvn@3882 | 1673 | instruct storeV8(memory mem, vecD src) %{ |
kvn@3882 | 1674 | predicate(n->as_StoreVector()->memory_size() == 8); |
kvn@3882 | 1675 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1676 | ins_cost(145); |
kvn@3882 | 1677 | format %{ "movq $mem,$src\t! store vector (8 bytes)" %} |
kvn@3882 | 1678 | ins_encode %{ |
kvn@3882 | 1679 | __ movq($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1680 | %} |
kvn@3882 | 1681 | ins_pipe( pipe_slow ); |
kvn@3882 | 1682 | %} |
kvn@3882 | 1683 | |
kvn@3882 | 1684 | instruct storeV16(memory mem, vecX src) %{ |
kvn@3882 | 1685 | predicate(n->as_StoreVector()->memory_size() == 16); |
kvn@3882 | 1686 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1687 | ins_cost(145); |
kvn@3882 | 1688 | format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %} |
kvn@3882 | 1689 | ins_encode %{ |
kvn@3882 | 1690 | __ movdqu($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1691 | %} |
kvn@3882 | 1692 | ins_pipe( pipe_slow ); |
kvn@3882 | 1693 | %} |
kvn@3882 | 1694 | |
kvn@3882 | 1695 | instruct storeV32(memory mem, vecY src) %{ |
kvn@3882 | 1696 | predicate(n->as_StoreVector()->memory_size() == 32); |
kvn@3882 | 1697 | match(Set mem (StoreVector mem src)); |
kvn@3882 | 1698 | ins_cost(145); |
kvn@3882 | 1699 | format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %} |
kvn@3882 | 1700 | ins_encode %{ |
kvn@3882 | 1701 | __ vmovdqu($mem$$Address, $src$$XMMRegister); |
kvn@3882 | 1702 | %} |
kvn@3882 | 1703 | ins_pipe( pipe_slow ); |
kvn@3882 | 1704 | %} |
kvn@3882 | 1705 | |
kvn@3882 | 1706 | // Replicate byte scalar to be vector |
kvn@3882 | 1707 | instruct Repl4B(vecS dst, rRegI src) %{ |
kvn@3882 | 1708 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1709 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1710 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1711 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1712 | "pshuflw $dst,$dst,0x00\t! replicate4B" %} |
kvn@3882 | 1713 | ins_encode %{ |
kvn@3882 | 1714 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1715 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1716 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1717 | %} |
kvn@3882 | 1718 | ins_pipe( pipe_slow ); |
kvn@3882 | 1719 | %} |
kvn@3882 | 1720 | |
kvn@3882 | 1721 | instruct Repl8B(vecD dst, rRegI src) %{ |
kvn@3882 | 1722 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1723 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1724 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1725 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1726 | "pshuflw $dst,$dst,0x00\t! replicate8B" %} |
kvn@3882 | 1727 | ins_encode %{ |
kvn@3882 | 1728 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1729 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1730 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1731 | %} |
kvn@3882 | 1732 | ins_pipe( pipe_slow ); |
kvn@3882 | 1733 | %} |
kvn@3882 | 1734 | |
kvn@3882 | 1735 | instruct Repl16B(vecX dst, rRegI src) %{ |
kvn@3882 | 1736 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1737 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1738 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1739 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1740 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1741 | "punpcklqdq $dst,$dst\t! replicate16B" %} |
kvn@3882 | 1742 | ins_encode %{ |
kvn@3882 | 1743 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1744 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1745 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1746 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1747 | %} |
kvn@3882 | 1748 | ins_pipe( pipe_slow ); |
kvn@3882 | 1749 | %} |
kvn@3882 | 1750 | |
kvn@3882 | 1751 | instruct Repl32B(vecY dst, rRegI src) %{ |
kvn@3882 | 1752 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1753 | match(Set dst (ReplicateB src)); |
kvn@3882 | 1754 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1755 | "punpcklbw $dst,$dst\n\t" |
kvn@3882 | 1756 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1757 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1758 | "vinserti128h $dst,$dst,$dst\t! replicate32B" %} |
kvn@3882 | 1759 | ins_encode %{ |
kvn@3882 | 1760 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1761 | __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1762 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1763 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1764 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1765 | %} |
kvn@3882 | 1766 | ins_pipe( pipe_slow ); |
kvn@3882 | 1767 | %} |
kvn@3882 | 1768 | |
kvn@3882 | 1769 | // Replicate byte scalar immediate to be vector by loading from const table. |
kvn@3882 | 1770 | instruct Repl4B_imm(vecS dst, immI con) %{ |
kvn@3882 | 1771 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1772 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1773 | format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %} |
kvn@3882 | 1774 | ins_encode %{ |
kvn@3929 | 1775 | __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1))); |
kvn@3882 | 1776 | %} |
kvn@3882 | 1777 | ins_pipe( pipe_slow ); |
kvn@3882 | 1778 | %} |
kvn@3882 | 1779 | |
kvn@3882 | 1780 | instruct Repl8B_imm(vecD dst, immI con) %{ |
kvn@3882 | 1781 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1782 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1783 | format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %} |
kvn@3882 | 1784 | ins_encode %{ |
kvn@3929 | 1785 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3882 | 1786 | %} |
kvn@3882 | 1787 | ins_pipe( pipe_slow ); |
kvn@3882 | 1788 | %} |
kvn@3882 | 1789 | |
kvn@3882 | 1790 | instruct Repl16B_imm(vecX dst, immI con) %{ |
kvn@3882 | 1791 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1792 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1793 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1794 | "punpcklqdq $dst,$dst\t! replicate16B($con)" %} |
kvn@3882 | 1795 | ins_encode %{ |
kvn@3929 | 1796 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3929 | 1797 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1798 | %} |
kvn@3882 | 1799 | ins_pipe( pipe_slow ); |
kvn@3882 | 1800 | %} |
kvn@3882 | 1801 | |
kvn@3882 | 1802 | instruct Repl32B_imm(vecY dst, immI con) %{ |
kvn@3882 | 1803 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1804 | match(Set dst (ReplicateB con)); |
kvn@3929 | 1805 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1806 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1807 | "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %} |
kvn@3882 | 1808 | ins_encode %{ |
kvn@3929 | 1809 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
kvn@3929 | 1810 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1811 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1812 | %} |
kvn@3882 | 1813 | ins_pipe( pipe_slow ); |
kvn@3882 | 1814 | %} |
kvn@3882 | 1815 | |
kvn@3882 | 1816 | // Replicate byte scalar zero to be vector |
kvn@3882 | 1817 | instruct Repl4B_zero(vecS dst, immI0 zero) %{ |
kvn@3882 | 1818 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1819 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1820 | format %{ "pxor $dst,$dst\t! replicate4B zero" %} |
kvn@3882 | 1821 | ins_encode %{ |
kvn@3882 | 1822 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1823 | %} |
kvn@3882 | 1824 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1825 | %} |
kvn@3882 | 1826 | |
kvn@3882 | 1827 | instruct Repl8B_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 1828 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1829 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1830 | format %{ "pxor $dst,$dst\t! replicate8B zero" %} |
kvn@3882 | 1831 | ins_encode %{ |
kvn@3882 | 1832 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1833 | %} |
kvn@3882 | 1834 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1835 | %} |
kvn@3882 | 1836 | |
kvn@3882 | 1837 | instruct Repl16B_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 1838 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1839 | match(Set dst (ReplicateB zero)); |
kvn@3882 | 1840 | format %{ "pxor $dst,$dst\t! replicate16B zero" %} |
kvn@3882 | 1841 | ins_encode %{ |
kvn@3882 | 1842 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1843 | %} |
kvn@3882 | 1844 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1845 | %} |
kvn@3882 | 1846 | |
kvn@3882 | 1847 | instruct Repl32B_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 1848 | predicate(n->as_Vector()->length() == 32); |
kvn@3882 | 1849 | match(Set dst (ReplicateB zero)); |
kvn@3929 | 1850 | format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %} |
kvn@3882 | 1851 | ins_encode %{ |
kvn@3882 | 1852 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 1853 | bool vector256 = true; |
kvn@3929 | 1854 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 1855 | %} |
kvn@3882 | 1856 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1857 | %} |
kvn@3882 | 1858 | |
kvn@3882 | 1859 | // Replicate char/short (2 byte) scalar to be vector |
kvn@3882 | 1860 | instruct Repl2S(vecS dst, rRegI src) %{ |
kvn@3882 | 1861 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1862 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1863 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1864 | "pshuflw $dst,$dst,0x00\t! replicate2S" %} |
kvn@3882 | 1865 | ins_encode %{ |
kvn@3882 | 1866 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1867 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1868 | %} |
kvn@3882 | 1869 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1870 | %} |
kvn@3882 | 1871 | |
kvn@3882 | 1872 | instruct Repl4S(vecD dst, rRegI src) %{ |
kvn@3882 | 1873 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1874 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1875 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1876 | "pshuflw $dst,$dst,0x00\t! replicate4S" %} |
kvn@3882 | 1877 | ins_encode %{ |
kvn@3882 | 1878 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1879 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 1880 | %} |
kvn@3882 | 1881 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1882 | %} |
kvn@3882 | 1883 | |
kvn@3882 | 1884 | instruct Repl8S(vecX dst, rRegI src) %{ |
kvn@3882 | 1885 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1886 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1887 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1888 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1889 | "punpcklqdq $dst,$dst\t! replicate8S" %} |
kvn@3882 | 1890 | ins_encode %{ |
kvn@3882 | 1891 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1892 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1893 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1894 | %} |
kvn@3882 | 1895 | ins_pipe( pipe_slow ); |
kvn@3882 | 1896 | %} |
kvn@3882 | 1897 | |
kvn@3882 | 1898 | instruct Repl16S(vecY dst, rRegI src) %{ |
kvn@3882 | 1899 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1900 | match(Set dst (ReplicateS src)); |
kvn@3882 | 1901 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 1902 | "pshuflw $dst,$dst,0x00\n\t" |
kvn@3929 | 1903 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1904 | "vinserti128h $dst,$dst,$dst\t! replicate16S" %} |
kvn@3882 | 1905 | ins_encode %{ |
kvn@3882 | 1906 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 1907 | __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 1908 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1909 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1910 | %} |
kvn@3882 | 1911 | ins_pipe( pipe_slow ); |
kvn@3882 | 1912 | %} |
kvn@3882 | 1913 | |
kvn@3882 | 1914 | // Replicate char/short (2 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 1915 | instruct Repl2S_imm(vecS dst, immI con) %{ |
kvn@3882 | 1916 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1917 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1918 | format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %} |
kvn@3882 | 1919 | ins_encode %{ |
kvn@3929 | 1920 | __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2))); |
kvn@3882 | 1921 | %} |
kvn@3882 | 1922 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1923 | %} |
kvn@3882 | 1924 | |
kvn@3882 | 1925 | instruct Repl4S_imm(vecD dst, immI con) %{ |
kvn@3882 | 1926 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1927 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1928 | format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %} |
kvn@3882 | 1929 | ins_encode %{ |
kvn@3929 | 1930 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3882 | 1931 | %} |
kvn@3882 | 1932 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1933 | %} |
kvn@3882 | 1934 | |
kvn@3882 | 1935 | instruct Repl8S_imm(vecX dst, immI con) %{ |
kvn@3882 | 1936 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1937 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1938 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1939 | "punpcklqdq $dst,$dst\t! replicate8S($con)" %} |
kvn@3882 | 1940 | ins_encode %{ |
kvn@3929 | 1941 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3929 | 1942 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1943 | %} |
kvn@3882 | 1944 | ins_pipe( pipe_slow ); |
kvn@3882 | 1945 | %} |
kvn@3882 | 1946 | |
kvn@3882 | 1947 | instruct Repl16S_imm(vecY dst, immI con) %{ |
kvn@3882 | 1948 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1949 | match(Set dst (ReplicateS con)); |
kvn@3929 | 1950 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 1951 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 1952 | "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %} |
kvn@3882 | 1953 | ins_encode %{ |
kvn@3929 | 1954 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
kvn@3929 | 1955 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 1956 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1957 | %} |
kvn@3882 | 1958 | ins_pipe( pipe_slow ); |
kvn@3882 | 1959 | %} |
kvn@3882 | 1960 | |
kvn@3882 | 1961 | // Replicate char/short (2 byte) scalar zero to be vector |
kvn@3882 | 1962 | instruct Repl2S_zero(vecS dst, immI0 zero) %{ |
kvn@3882 | 1963 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 1964 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1965 | format %{ "pxor $dst,$dst\t! replicate2S zero" %} |
kvn@3882 | 1966 | ins_encode %{ |
kvn@3882 | 1967 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1968 | %} |
kvn@3882 | 1969 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1970 | %} |
kvn@3882 | 1971 | |
kvn@3882 | 1972 | instruct Repl4S_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 1973 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 1974 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1975 | format %{ "pxor $dst,$dst\t! replicate4S zero" %} |
kvn@3882 | 1976 | ins_encode %{ |
kvn@3882 | 1977 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1978 | %} |
kvn@3882 | 1979 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1980 | %} |
kvn@3882 | 1981 | |
kvn@3882 | 1982 | instruct Repl8S_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 1983 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 1984 | match(Set dst (ReplicateS zero)); |
kvn@3882 | 1985 | format %{ "pxor $dst,$dst\t! replicate8S zero" %} |
kvn@3882 | 1986 | ins_encode %{ |
kvn@3882 | 1987 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 1988 | %} |
kvn@3882 | 1989 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 1990 | %} |
kvn@3882 | 1991 | |
kvn@3882 | 1992 | instruct Repl16S_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 1993 | predicate(n->as_Vector()->length() == 16); |
kvn@3882 | 1994 | match(Set dst (ReplicateS zero)); |
kvn@3929 | 1995 | format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %} |
kvn@3882 | 1996 | ins_encode %{ |
kvn@3882 | 1997 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 1998 | bool vector256 = true; |
kvn@3929 | 1999 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2000 | %} |
kvn@3882 | 2001 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2002 | %} |
kvn@3882 | 2003 | |
kvn@3882 | 2004 | // Replicate integer (4 byte) scalar to be vector |
kvn@3882 | 2005 | instruct Repl2I(vecD dst, rRegI src) %{ |
kvn@3882 | 2006 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2007 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2008 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2009 | "pshufd $dst,$dst,0x00\t! replicate2I" %} |
kvn@3882 | 2010 | ins_encode %{ |
kvn@3882 | 2011 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2012 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2013 | %} |
kvn@3882 | 2014 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2015 | %} |
kvn@3882 | 2016 | |
kvn@3882 | 2017 | instruct Repl4I(vecX dst, rRegI src) %{ |
kvn@3882 | 2018 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2019 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2020 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2021 | "pshufd $dst,$dst,0x00\t! replicate4I" %} |
kvn@3882 | 2022 | ins_encode %{ |
kvn@3882 | 2023 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2024 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2025 | %} |
kvn@3882 | 2026 | ins_pipe( pipe_slow ); |
kvn@3882 | 2027 | %} |
kvn@3882 | 2028 | |
kvn@3882 | 2029 | instruct Repl8I(vecY dst, rRegI src) %{ |
kvn@3882 | 2030 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2031 | match(Set dst (ReplicateI src)); |
kvn@3882 | 2032 | format %{ "movd $dst,$src\n\t" |
kvn@3882 | 2033 | "pshufd $dst,$dst,0x00\n\t" |
kvn@3929 | 2034 | "vinserti128h $dst,$dst,$dst\t! replicate8I" %} |
kvn@3882 | 2035 | ins_encode %{ |
kvn@3882 | 2036 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2037 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 2038 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2039 | %} |
kvn@3882 | 2040 | ins_pipe( pipe_slow ); |
kvn@3882 | 2041 | %} |
kvn@3882 | 2042 | |
kvn@3882 | 2043 | // Replicate integer (4 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 2044 | instruct Repl2I_imm(vecD dst, immI con) %{ |
kvn@3882 | 2045 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2046 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2047 | format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %} |
kvn@3882 | 2048 | ins_encode %{ |
kvn@3929 | 2049 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3882 | 2050 | %} |
kvn@3882 | 2051 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2052 | %} |
kvn@3882 | 2053 | |
kvn@3882 | 2054 | instruct Repl4I_imm(vecX dst, immI con) %{ |
kvn@3882 | 2055 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2056 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2057 | format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t" |
kvn@3929 | 2058 | "punpcklqdq $dst,$dst" %} |
kvn@3882 | 2059 | ins_encode %{ |
kvn@3929 | 2060 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3929 | 2061 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2062 | %} |
kvn@3882 | 2063 | ins_pipe( pipe_slow ); |
kvn@3882 | 2064 | %} |
kvn@3882 | 2065 | |
kvn@3882 | 2066 | instruct Repl8I_imm(vecY dst, immI con) %{ |
kvn@3882 | 2067 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2068 | match(Set dst (ReplicateI con)); |
kvn@3929 | 2069 | format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" |
kvn@3929 | 2070 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2071 | "vinserti128h $dst,$dst,$dst" %} |
kvn@3882 | 2072 | ins_encode %{ |
kvn@3929 | 2073 | __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
kvn@3929 | 2074 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2075 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2076 | %} |
kvn@3882 | 2077 | ins_pipe( pipe_slow ); |
kvn@3882 | 2078 | %} |
kvn@3882 | 2079 | |
kvn@3882 | 2080 | // Integer could be loaded into xmm register directly from memory. |
kvn@3882 | 2081 | instruct Repl2I_mem(vecD dst, memory mem) %{ |
kvn@3882 | 2082 | predicate(n->as_Vector()->length() == 2); |
kvn@3929 | 2083 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2084 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2085 | "pshufd $dst,$dst,0x00\t! replicate2I" %} |
kvn@3882 | 2086 | ins_encode %{ |
kvn@3882 | 2087 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2088 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2089 | %} |
kvn@3882 | 2090 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2091 | %} |
kvn@3882 | 2092 | |
kvn@3882 | 2093 | instruct Repl4I_mem(vecX dst, memory mem) %{ |
kvn@3882 | 2094 | predicate(n->as_Vector()->length() == 4); |
kvn@3929 | 2095 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2096 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2097 | "pshufd $dst,$dst,0x00\t! replicate4I" %} |
kvn@3882 | 2098 | ins_encode %{ |
kvn@3882 | 2099 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2100 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3882 | 2101 | %} |
kvn@3882 | 2102 | ins_pipe( pipe_slow ); |
kvn@3882 | 2103 | %} |
kvn@3882 | 2104 | |
kvn@3882 | 2105 | instruct Repl8I_mem(vecY dst, memory mem) %{ |
kvn@3882 | 2106 | predicate(n->as_Vector()->length() == 8); |
kvn@3929 | 2107 | match(Set dst (ReplicateI (LoadI mem))); |
kvn@3882 | 2108 | format %{ "movd $dst,$mem\n\t" |
kvn@3882 | 2109 | "pshufd $dst,$dst,0x00\n\t" |
kvn@3929 | 2110 | "vinserti128h $dst,$dst,$dst\t! replicate8I" %} |
kvn@3882 | 2111 | ins_encode %{ |
kvn@3882 | 2112 | __ movdl($dst$$XMMRegister, $mem$$Address); |
kvn@3882 | 2113 | __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
kvn@3929 | 2114 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2115 | %} |
kvn@3882 | 2116 | ins_pipe( pipe_slow ); |
kvn@3882 | 2117 | %} |
kvn@3882 | 2118 | |
kvn@3882 | 2119 | // Replicate integer (4 byte) scalar zero to be vector |
kvn@3882 | 2120 | instruct Repl2I_zero(vecD dst, immI0 zero) %{ |
kvn@3882 | 2121 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2122 | match(Set dst (ReplicateI zero)); |
kvn@3882 | 2123 | format %{ "pxor $dst,$dst\t! replicate2I" %} |
kvn@3882 | 2124 | ins_encode %{ |
kvn@3882 | 2125 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2126 | %} |
kvn@3882 | 2127 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2128 | %} |
kvn@3882 | 2129 | |
kvn@3882 | 2130 | instruct Repl4I_zero(vecX dst, immI0 zero) %{ |
kvn@3882 | 2131 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2132 | match(Set dst (ReplicateI zero)); |
kvn@3882 | 2133 | format %{ "pxor $dst,$dst\t! replicate4I zero)" %} |
kvn@3882 | 2134 | ins_encode %{ |
kvn@3882 | 2135 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2136 | %} |
kvn@3882 | 2137 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2138 | %} |
kvn@3882 | 2139 | |
kvn@3882 | 2140 | instruct Repl8I_zero(vecY dst, immI0 zero) %{ |
kvn@3882 | 2141 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2142 | match(Set dst (ReplicateI zero)); |
kvn@3929 | 2143 | format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %} |
kvn@3882 | 2144 | ins_encode %{ |
kvn@3882 | 2145 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 2146 | bool vector256 = true; |
kvn@3929 | 2147 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2148 | %} |
kvn@3882 | 2149 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2150 | %} |
kvn@3882 | 2151 | |
kvn@3882 | 2152 | // Replicate long (8 byte) scalar to be vector |
kvn@3882 | 2153 | #ifdef _LP64 |
kvn@3882 | 2154 | instruct Repl2L(vecX dst, rRegL src) %{ |
kvn@3882 | 2155 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2156 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2157 | format %{ "movdq $dst,$src\n\t" |
kvn@3929 | 2158 | "punpcklqdq $dst,$dst\t! replicate2L" %} |
kvn@3882 | 2159 | ins_encode %{ |
kvn@3882 | 2160 | __ movdq($dst$$XMMRegister, $src$$Register); |
kvn@3929 | 2161 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2162 | %} |
kvn@3882 | 2163 | ins_pipe( pipe_slow ); |
kvn@3882 | 2164 | %} |
kvn@3882 | 2165 | |
kvn@3882 | 2166 | instruct Repl4L(vecY dst, rRegL src) %{ |
kvn@3882 | 2167 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2168 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2169 | format %{ "movdq $dst,$src\n\t" |
kvn@3929 | 2170 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2171 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2172 | ins_encode %{ |
kvn@3882 | 2173 | __ movdq($dst$$XMMRegister, $src$$Register); |
kvn@3929 | 2174 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2175 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2176 | %} |
kvn@3882 | 2177 | ins_pipe( pipe_slow ); |
kvn@3882 | 2178 | %} |
kvn@3882 | 2179 | #else // _LP64 |
kvn@3882 | 2180 | instruct Repl2L(vecX dst, eRegL src, regD tmp) %{ |
kvn@3882 | 2181 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2182 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2183 | effect(TEMP dst, USE src, TEMP tmp); |
kvn@3882 | 2184 | format %{ "movdl $dst,$src.lo\n\t" |
kvn@3882 | 2185 | "movdl $tmp,$src.hi\n\t" |
kvn@3882 | 2186 | "punpckldq $dst,$tmp\n\t" |
kvn@3929 | 2187 | "punpcklqdq $dst,$dst\t! replicate2L"%} |
kvn@3882 | 2188 | ins_encode %{ |
kvn@3882 | 2189 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2190 | __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
kvn@3882 | 2191 | __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
kvn@3929 | 2192 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2193 | %} |
kvn@3882 | 2194 | ins_pipe( pipe_slow ); |
kvn@3882 | 2195 | %} |
kvn@3882 | 2196 | |
kvn@3882 | 2197 | instruct Repl4L(vecY dst, eRegL src, regD tmp) %{ |
kvn@3882 | 2198 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2199 | match(Set dst (ReplicateL src)); |
kvn@3882 | 2200 | effect(TEMP dst, USE src, TEMP tmp); |
kvn@3882 | 2201 | format %{ "movdl $dst,$src.lo\n\t" |
kvn@3882 | 2202 | "movdl $tmp,$src.hi\n\t" |
kvn@3882 | 2203 | "punpckldq $dst,$tmp\n\t" |
kvn@3929 | 2204 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2205 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2206 | ins_encode %{ |
kvn@3882 | 2207 | __ movdl($dst$$XMMRegister, $src$$Register); |
kvn@3882 | 2208 | __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
kvn@3882 | 2209 | __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
kvn@3929 | 2210 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2211 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2212 | %} |
kvn@3882 | 2213 | ins_pipe( pipe_slow ); |
kvn@3882 | 2214 | %} |
kvn@3882 | 2215 | #endif // _LP64 |
kvn@3882 | 2216 | |
kvn@3882 | 2217 | // Replicate long (8 byte) scalar immediate to be vector by loading from const table. |
kvn@3882 | 2218 | instruct Repl2L_imm(vecX dst, immL con) %{ |
kvn@3882 | 2219 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2220 | match(Set dst (ReplicateL con)); |
kvn@3929 | 2221 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 2222 | "punpcklqdq $dst,$dst\t! replicate2L($con)" %} |
kvn@3882 | 2223 | ins_encode %{ |
kvn@3929 | 2224 | __ movq($dst$$XMMRegister, $constantaddress($con)); |
kvn@3929 | 2225 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2226 | %} |
kvn@3882 | 2227 | ins_pipe( pipe_slow ); |
kvn@3882 | 2228 | %} |
kvn@3882 | 2229 | |
kvn@3882 | 2230 | instruct Repl4L_imm(vecY dst, immL con) %{ |
kvn@3882 | 2231 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2232 | match(Set dst (ReplicateL con)); |
kvn@3929 | 2233 | format %{ "movq $dst,[$constantaddress]\n\t" |
kvn@3929 | 2234 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2235 | "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %} |
kvn@3882 | 2236 | ins_encode %{ |
kvn@3929 | 2237 | __ movq($dst$$XMMRegister, $constantaddress($con)); |
kvn@3929 | 2238 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2239 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2240 | %} |
kvn@3882 | 2241 | ins_pipe( pipe_slow ); |
kvn@3882 | 2242 | %} |
kvn@3882 | 2243 | |
kvn@3882 | 2244 | // Long could be loaded into xmm register directly from memory. |
kvn@3882 | 2245 | instruct Repl2L_mem(vecX dst, memory mem) %{ |
kvn@3882 | 2246 | predicate(n->as_Vector()->length() == 2); |
kvn@3929 | 2247 | match(Set dst (ReplicateL (LoadL mem))); |
kvn@3882 | 2248 | format %{ "movq $dst,$mem\n\t" |
kvn@3929 | 2249 | "punpcklqdq $dst,$dst\t! replicate2L" %} |
kvn@3882 | 2250 | ins_encode %{ |
kvn@3882 | 2251 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3929 | 2252 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2253 | %} |
kvn@3882 | 2254 | ins_pipe( pipe_slow ); |
kvn@3882 | 2255 | %} |
kvn@3882 | 2256 | |
kvn@3882 | 2257 | instruct Repl4L_mem(vecY dst, memory mem) %{ |
kvn@3882 | 2258 | predicate(n->as_Vector()->length() == 4); |
kvn@3929 | 2259 | match(Set dst (ReplicateL (LoadL mem))); |
kvn@3882 | 2260 | format %{ "movq $dst,$mem\n\t" |
kvn@3929 | 2261 | "punpcklqdq $dst,$dst\n\t" |
kvn@3929 | 2262 | "vinserti128h $dst,$dst,$dst\t! replicate4L" %} |
kvn@3882 | 2263 | ins_encode %{ |
kvn@3882 | 2264 | __ movq($dst$$XMMRegister, $mem$$Address); |
kvn@3929 | 2265 | __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3929 | 2266 | __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2267 | %} |
kvn@3882 | 2268 | ins_pipe( pipe_slow ); |
kvn@3882 | 2269 | %} |
kvn@3882 | 2270 | |
kvn@3882 | 2271 | // Replicate long (8 byte) scalar zero to be vector |
kvn@3882 | 2272 | instruct Repl2L_zero(vecX dst, immL0 zero) %{ |
kvn@3882 | 2273 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2274 | match(Set dst (ReplicateL zero)); |
kvn@3882 | 2275 | format %{ "pxor $dst,$dst\t! replicate2L zero" %} |
kvn@3882 | 2276 | ins_encode %{ |
kvn@3882 | 2277 | __ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2278 | %} |
kvn@3882 | 2279 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2280 | %} |
kvn@3882 | 2281 | |
kvn@3882 | 2282 | instruct Repl4L_zero(vecY dst, immL0 zero) %{ |
kvn@3882 | 2283 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2284 | match(Set dst (ReplicateL zero)); |
kvn@3929 | 2285 | format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %} |
kvn@3882 | 2286 | ins_encode %{ |
kvn@3882 | 2287 | // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
kvn@3882 | 2288 | bool vector256 = true; |
kvn@3929 | 2289 | __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2290 | %} |
kvn@3882 | 2291 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2292 | %} |
kvn@3882 | 2293 | |
kvn@3882 | 2294 | // Replicate float (4 byte) scalar to be vector |
kvn@3882 | 2295 | instruct Repl2F(vecD dst, regF src) %{ |
kvn@3882 | 2296 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2297 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2298 | format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %} |
kvn@3882 | 2299 | ins_encode %{ |
kvn@3882 | 2300 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2301 | %} |
kvn@3882 | 2302 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2303 | %} |
kvn@3882 | 2304 | |
kvn@3882 | 2305 | instruct Repl4F(vecX dst, regF src) %{ |
kvn@3882 | 2306 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2307 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2308 | format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %} |
kvn@3882 | 2309 | ins_encode %{ |
kvn@3882 | 2310 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2311 | %} |
kvn@3882 | 2312 | ins_pipe( pipe_slow ); |
kvn@3882 | 2313 | %} |
kvn@3882 | 2314 | |
kvn@3882 | 2315 | instruct Repl8F(vecY dst, regF src) %{ |
kvn@3882 | 2316 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2317 | match(Set dst (ReplicateF src)); |
kvn@3882 | 2318 | format %{ "pshufd $dst,$src,0x00\n\t" |
kvn@3882 | 2319 | "vinsertf128h $dst,$dst,$dst\t! replicate8F" %} |
kvn@3882 | 2320 | ins_encode %{ |
kvn@3882 | 2321 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
kvn@3882 | 2322 | __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2323 | %} |
kvn@3882 | 2324 | ins_pipe( pipe_slow ); |
kvn@3882 | 2325 | %} |
kvn@3882 | 2326 | |
kvn@3882 | 2327 | // Replicate float (4 byte) scalar zero to be vector |
kvn@3882 | 2328 | instruct Repl2F_zero(vecD dst, immF0 zero) %{ |
kvn@3882 | 2329 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2330 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2331 | format %{ "xorps $dst,$dst\t! replicate2F zero" %} |
kvn@3882 | 2332 | ins_encode %{ |
kvn@3882 | 2333 | __ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2334 | %} |
kvn@3882 | 2335 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2336 | %} |
kvn@3882 | 2337 | |
kvn@3882 | 2338 | instruct Repl4F_zero(vecX dst, immF0 zero) %{ |
kvn@3882 | 2339 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2340 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2341 | format %{ "xorps $dst,$dst\t! replicate4F zero" %} |
kvn@3882 | 2342 | ins_encode %{ |
kvn@3882 | 2343 | __ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2344 | %} |
kvn@3882 | 2345 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2346 | %} |
kvn@3882 | 2347 | |
kvn@3882 | 2348 | instruct Repl8F_zero(vecY dst, immF0 zero) %{ |
kvn@3882 | 2349 | predicate(n->as_Vector()->length() == 8); |
kvn@3882 | 2350 | match(Set dst (ReplicateF zero)); |
kvn@3882 | 2351 | format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %} |
kvn@3882 | 2352 | ins_encode %{ |
kvn@3882 | 2353 | bool vector256 = true; |
kvn@3882 | 2354 | __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2355 | %} |
kvn@3882 | 2356 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2357 | %} |
kvn@3882 | 2358 | |
kvn@3882 | 2359 | // Replicate double (8 bytes) scalar to be vector |
kvn@3882 | 2360 | instruct Repl2D(vecX dst, regD src) %{ |
kvn@3882 | 2361 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2362 | match(Set dst (ReplicateD src)); |
kvn@3882 | 2363 | format %{ "pshufd $dst,$src,0x44\t! replicate2D" %} |
kvn@3882 | 2364 | ins_encode %{ |
kvn@3882 | 2365 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
kvn@3882 | 2366 | %} |
kvn@3882 | 2367 | ins_pipe( pipe_slow ); |
kvn@3882 | 2368 | %} |
kvn@3882 | 2369 | |
kvn@3882 | 2370 | instruct Repl4D(vecY dst, regD src) %{ |
kvn@3882 | 2371 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2372 | match(Set dst (ReplicateD src)); |
kvn@3882 | 2373 | format %{ "pshufd $dst,$src,0x44\n\t" |
kvn@3882 | 2374 | "vinsertf128h $dst,$dst,$dst\t! replicate4D" %} |
kvn@3882 | 2375 | ins_encode %{ |
kvn@3882 | 2376 | __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
kvn@3882 | 2377 | __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2378 | %} |
kvn@3882 | 2379 | ins_pipe( pipe_slow ); |
kvn@3882 | 2380 | %} |
kvn@3882 | 2381 | |
kvn@3882 | 2382 | // Replicate double (8 byte) scalar zero to be vector |
kvn@3882 | 2383 | instruct Repl2D_zero(vecX dst, immD0 zero) %{ |
kvn@3882 | 2384 | predicate(n->as_Vector()->length() == 2); |
kvn@3882 | 2385 | match(Set dst (ReplicateD zero)); |
kvn@3882 | 2386 | format %{ "xorpd $dst,$dst\t! replicate2D zero" %} |
kvn@3882 | 2387 | ins_encode %{ |
kvn@3882 | 2388 | __ xorpd($dst$$XMMRegister, $dst$$XMMRegister); |
kvn@3882 | 2389 | %} |
kvn@3882 | 2390 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2391 | %} |
kvn@3882 | 2392 | |
kvn@3882 | 2393 | instruct Repl4D_zero(vecY dst, immD0 zero) %{ |
kvn@3882 | 2394 | predicate(n->as_Vector()->length() == 4); |
kvn@3882 | 2395 | match(Set dst (ReplicateD zero)); |
kvn@3882 | 2396 | format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %} |
kvn@3882 | 2397 | ins_encode %{ |
kvn@3882 | 2398 | bool vector256 = true; |
kvn@3882 | 2399 | __ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); |
kvn@3882 | 2400 | %} |
kvn@3882 | 2401 | ins_pipe( fpu_reg_reg ); |
kvn@3882 | 2402 | %} |
kvn@3882 | 2403 | |
kvn@4001 | 2404 | // ====================VECTOR ARITHMETIC======================================= |
kvn@4001 | 2405 | |
kvn@4001 | 2406 | // --------------------------------- ADD -------------------------------------- |
kvn@4001 | 2407 | |
kvn@4001 | 2408 | // Bytes vector add |
kvn@4001 | 2409 | instruct vadd4B(vecS dst, vecS src) %{ |
kvn@4001 | 2410 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2411 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2412 | format %{ "paddb $dst,$src\t! add packed4B" %} |
kvn@4001 | 2413 | ins_encode %{ |
kvn@4001 | 2414 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2415 | %} |
kvn@4001 | 2416 | ins_pipe( pipe_slow ); |
kvn@4001 | 2417 | %} |
kvn@4001 | 2418 | |
kvn@4001 | 2419 | instruct vadd4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2420 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2421 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2422 | format %{ "vpaddb $dst,$src1,$src2\t! add packed4B" %} |
kvn@4001 | 2423 | ins_encode %{ |
kvn@4001 | 2424 | bool vector256 = false; |
kvn@4001 | 2425 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2426 | %} |
kvn@4001 | 2427 | ins_pipe( pipe_slow ); |
kvn@4001 | 2428 | %} |
kvn@4001 | 2429 | |
kvn@4001 | 2430 | instruct vadd8B(vecD dst, vecD src) %{ |
kvn@4001 | 2431 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2432 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2433 | format %{ "paddb $dst,$src\t! add packed8B" %} |
kvn@4001 | 2434 | ins_encode %{ |
kvn@4001 | 2435 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2436 | %} |
kvn@4001 | 2437 | ins_pipe( pipe_slow ); |
kvn@4001 | 2438 | %} |
kvn@4001 | 2439 | |
kvn@4001 | 2440 | instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2441 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2442 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2443 | format %{ "vpaddb $dst,$src1,$src2\t! add packed8B" %} |
kvn@4001 | 2444 | ins_encode %{ |
kvn@4001 | 2445 | bool vector256 = false; |
kvn@4001 | 2446 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2447 | %} |
kvn@4001 | 2448 | ins_pipe( pipe_slow ); |
kvn@4001 | 2449 | %} |
kvn@4001 | 2450 | |
kvn@4001 | 2451 | instruct vadd16B(vecX dst, vecX src) %{ |
kvn@4001 | 2452 | predicate(n->as_Vector()->length() == 16); |
kvn@4001 | 2453 | match(Set dst (AddVB dst src)); |
kvn@4001 | 2454 | format %{ "paddb $dst,$src\t! add packed16B" %} |
kvn@4001 | 2455 | ins_encode %{ |
kvn@4001 | 2456 | __ paddb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2457 | %} |
kvn@4001 | 2458 | ins_pipe( pipe_slow ); |
kvn@4001 | 2459 | %} |
kvn@4001 | 2460 | |
kvn@4001 | 2461 | instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2462 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2463 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2464 | format %{ "vpaddb $dst,$src1,$src2\t! add packed16B" %} |
kvn@4001 | 2465 | ins_encode %{ |
kvn@4001 | 2466 | bool vector256 = false; |
kvn@4001 | 2467 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2468 | %} |
kvn@4001 | 2469 | ins_pipe( pipe_slow ); |
kvn@4001 | 2470 | %} |
kvn@4001 | 2471 | |
kvn@4001 | 2472 | instruct vadd16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2473 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2474 | match(Set dst (AddVB src (LoadVector mem))); |
kvn@4001 | 2475 | format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %} |
kvn@4001 | 2476 | ins_encode %{ |
kvn@4001 | 2477 | bool vector256 = false; |
kvn@4001 | 2478 | __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2479 | %} |
kvn@4001 | 2480 | ins_pipe( pipe_slow ); |
kvn@4001 | 2481 | %} |
kvn@4001 | 2482 | |
kvn@4001 | 2483 | instruct vadd32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2484 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2485 | match(Set dst (AddVB src1 src2)); |
kvn@4001 | 2486 | format %{ "vpaddb $dst,$src1,$src2\t! add packed32B" %} |
kvn@4001 | 2487 | ins_encode %{ |
kvn@4001 | 2488 | bool vector256 = true; |
kvn@4001 | 2489 | __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2490 | %} |
kvn@4001 | 2491 | ins_pipe( pipe_slow ); |
kvn@4001 | 2492 | %} |
kvn@4001 | 2493 | |
kvn@4001 | 2494 | instruct vadd32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2495 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2496 | match(Set dst (AddVB src (LoadVector mem))); |
kvn@4001 | 2497 | format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %} |
kvn@4001 | 2498 | ins_encode %{ |
kvn@4001 | 2499 | bool vector256 = true; |
kvn@4001 | 2500 | __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2501 | %} |
kvn@4001 | 2502 | ins_pipe( pipe_slow ); |
kvn@4001 | 2503 | %} |
kvn@4001 | 2504 | |
kvn@4001 | 2505 | // Shorts/Chars vector add |
kvn@4001 | 2506 | instruct vadd2S(vecS dst, vecS src) %{ |
kvn@4001 | 2507 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2508 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2509 | format %{ "paddw $dst,$src\t! add packed2S" %} |
kvn@4001 | 2510 | ins_encode %{ |
kvn@4001 | 2511 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2512 | %} |
kvn@4001 | 2513 | ins_pipe( pipe_slow ); |
kvn@4001 | 2514 | %} |
kvn@4001 | 2515 | |
kvn@4001 | 2516 | instruct vadd2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2517 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2518 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2519 | format %{ "vpaddw $dst,$src1,$src2\t! add packed2S" %} |
kvn@4001 | 2520 | ins_encode %{ |
kvn@4001 | 2521 | bool vector256 = false; |
kvn@4001 | 2522 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2523 | %} |
kvn@4001 | 2524 | ins_pipe( pipe_slow ); |
kvn@4001 | 2525 | %} |
kvn@4001 | 2526 | |
kvn@4001 | 2527 | instruct vadd4S(vecD dst, vecD src) %{ |
kvn@4001 | 2528 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2529 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2530 | format %{ "paddw $dst,$src\t! add packed4S" %} |
kvn@4001 | 2531 | ins_encode %{ |
kvn@4001 | 2532 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2533 | %} |
kvn@4001 | 2534 | ins_pipe( pipe_slow ); |
kvn@4001 | 2535 | %} |
kvn@4001 | 2536 | |
kvn@4001 | 2537 | instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2538 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2539 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2540 | format %{ "vpaddw $dst,$src1,$src2\t! add packed4S" %} |
kvn@4001 | 2541 | ins_encode %{ |
kvn@4001 | 2542 | bool vector256 = false; |
kvn@4001 | 2543 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2544 | %} |
kvn@4001 | 2545 | ins_pipe( pipe_slow ); |
kvn@4001 | 2546 | %} |
kvn@4001 | 2547 | |
kvn@4001 | 2548 | instruct vadd8S(vecX dst, vecX src) %{ |
kvn@4001 | 2549 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2550 | match(Set dst (AddVS dst src)); |
kvn@4001 | 2551 | format %{ "paddw $dst,$src\t! add packed8S" %} |
kvn@4001 | 2552 | ins_encode %{ |
kvn@4001 | 2553 | __ paddw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2554 | %} |
kvn@4001 | 2555 | ins_pipe( pipe_slow ); |
kvn@4001 | 2556 | %} |
kvn@4001 | 2557 | |
kvn@4001 | 2558 | instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2559 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2560 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2561 | format %{ "vpaddw $dst,$src1,$src2\t! add packed8S" %} |
kvn@4001 | 2562 | ins_encode %{ |
kvn@4001 | 2563 | bool vector256 = false; |
kvn@4001 | 2564 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2565 | %} |
kvn@4001 | 2566 | ins_pipe( pipe_slow ); |
kvn@4001 | 2567 | %} |
kvn@4001 | 2568 | |
kvn@4001 | 2569 | instruct vadd8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2570 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2571 | match(Set dst (AddVS src (LoadVector mem))); |
kvn@4001 | 2572 | format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %} |
kvn@4001 | 2573 | ins_encode %{ |
kvn@4001 | 2574 | bool vector256 = false; |
kvn@4001 | 2575 | __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2576 | %} |
kvn@4001 | 2577 | ins_pipe( pipe_slow ); |
kvn@4001 | 2578 | %} |
kvn@4001 | 2579 | |
kvn@4001 | 2580 | instruct vadd16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2581 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 2582 | match(Set dst (AddVS src1 src2)); |
kvn@4001 | 2583 | format %{ "vpaddw $dst,$src1,$src2\t! add packed16S" %} |
kvn@4001 | 2584 | ins_encode %{ |
kvn@4001 | 2585 | bool vector256 = true; |
kvn@4001 | 2586 | __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2587 | %} |
kvn@4001 | 2588 | ins_pipe( pipe_slow ); |
kvn@4001 | 2589 | %} |
kvn@4001 | 2590 | |
kvn@4001 | 2591 | instruct vadd16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2592 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 2593 | match(Set dst (AddVS src (LoadVector mem))); |
kvn@4001 | 2594 | format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %} |
kvn@4001 | 2595 | ins_encode %{ |
kvn@4001 | 2596 | bool vector256 = true; |
kvn@4001 | 2597 | __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2598 | %} |
kvn@4001 | 2599 | ins_pipe( pipe_slow ); |
kvn@4001 | 2600 | %} |
kvn@4001 | 2601 | |
kvn@4001 | 2602 | // Integers vector add |
kvn@4001 | 2603 | instruct vadd2I(vecD dst, vecD src) %{ |
kvn@4001 | 2604 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2605 | match(Set dst (AddVI dst src)); |
kvn@4001 | 2606 | format %{ "paddd $dst,$src\t! add packed2I" %} |
kvn@4001 | 2607 | ins_encode %{ |
kvn@4001 | 2608 | __ paddd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2609 | %} |
kvn@4001 | 2610 | ins_pipe( pipe_slow ); |
kvn@4001 | 2611 | %} |
kvn@4001 | 2612 | |
kvn@4001 | 2613 | instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2614 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2615 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2616 | format %{ "vpaddd $dst,$src1,$src2\t! add packed2I" %} |
kvn@4001 | 2617 | ins_encode %{ |
kvn@4001 | 2618 | bool vector256 = false; |
kvn@4001 | 2619 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2620 | %} |
kvn@4001 | 2621 | ins_pipe( pipe_slow ); |
kvn@4001 | 2622 | %} |
kvn@4001 | 2623 | |
kvn@4001 | 2624 | instruct vadd4I(vecX dst, vecX src) %{ |
kvn@4001 | 2625 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2626 | match(Set dst (AddVI dst src)); |
kvn@4001 | 2627 | format %{ "paddd $dst,$src\t! add packed4I" %} |
kvn@4001 | 2628 | ins_encode %{ |
kvn@4001 | 2629 | __ paddd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2630 | %} |
kvn@4001 | 2631 | ins_pipe( pipe_slow ); |
kvn@4001 | 2632 | %} |
kvn@4001 | 2633 | |
kvn@4001 | 2634 | instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2635 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2636 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2637 | format %{ "vpaddd $dst,$src1,$src2\t! add packed4I" %} |
kvn@4001 | 2638 | ins_encode %{ |
kvn@4001 | 2639 | bool vector256 = false; |
kvn@4001 | 2640 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2641 | %} |
kvn@4001 | 2642 | ins_pipe( pipe_slow ); |
kvn@4001 | 2643 | %} |
kvn@4001 | 2644 | |
kvn@4001 | 2645 | instruct vadd4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2646 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2647 | match(Set dst (AddVI src (LoadVector mem))); |
kvn@4001 | 2648 | format %{ "vpaddd $dst,$src,$mem\t! add packed4I" %} |
kvn@4001 | 2649 | ins_encode %{ |
kvn@4001 | 2650 | bool vector256 = false; |
kvn@4001 | 2651 | __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2652 | %} |
kvn@4001 | 2653 | ins_pipe( pipe_slow ); |
kvn@4001 | 2654 | %} |
kvn@4001 | 2655 | |
kvn@4001 | 2656 | instruct vadd8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2657 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 2658 | match(Set dst (AddVI src1 src2)); |
kvn@4001 | 2659 | format %{ "vpaddd $dst,$src1,$src2\t! add packed8I" %} |
kvn@4001 | 2660 | ins_encode %{ |
kvn@4001 | 2661 | bool vector256 = true; |
kvn@4001 | 2662 | __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2663 | %} |
kvn@4001 | 2664 | ins_pipe( pipe_slow ); |
kvn@4001 | 2665 | %} |
kvn@4001 | 2666 | |
kvn@4001 | 2667 | instruct vadd8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2668 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 2669 | match(Set dst (AddVI src (LoadVector mem))); |
kvn@4001 | 2670 | format %{ "vpaddd $dst,$src,$mem\t! add packed8I" %} |
kvn@4001 | 2671 | ins_encode %{ |
kvn@4001 | 2672 | bool vector256 = true; |
kvn@4001 | 2673 | __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2674 | %} |
kvn@4001 | 2675 | ins_pipe( pipe_slow ); |
kvn@4001 | 2676 | %} |
kvn@4001 | 2677 | |
kvn@4001 | 2678 | // Longs vector add |
kvn@4001 | 2679 | instruct vadd2L(vecX dst, vecX src) %{ |
kvn@4001 | 2680 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2681 | match(Set dst (AddVL dst src)); |
kvn@4001 | 2682 | format %{ "paddq $dst,$src\t! add packed2L" %} |
kvn@4001 | 2683 | ins_encode %{ |
kvn@4001 | 2684 | __ paddq($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2685 | %} |
kvn@4001 | 2686 | ins_pipe( pipe_slow ); |
kvn@4001 | 2687 | %} |
kvn@4001 | 2688 | |
kvn@4001 | 2689 | instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2690 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2691 | match(Set dst (AddVL src1 src2)); |
kvn@4001 | 2692 | format %{ "vpaddq $dst,$src1,$src2\t! add packed2L" %} |
kvn@4001 | 2693 | ins_encode %{ |
kvn@4001 | 2694 | bool vector256 = false; |
kvn@4001 | 2695 | __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2696 | %} |
kvn@4001 | 2697 | ins_pipe( pipe_slow ); |
kvn@4001 | 2698 | %} |
kvn@4001 | 2699 | |
kvn@4001 | 2700 | instruct vadd2L_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2701 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2702 | match(Set dst (AddVL src (LoadVector mem))); |
kvn@4001 | 2703 | format %{ "vpaddq $dst,$src,$mem\t! add packed2L" %} |
kvn@4001 | 2704 | ins_encode %{ |
kvn@4001 | 2705 | bool vector256 = false; |
kvn@4001 | 2706 | __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2707 | %} |
kvn@4001 | 2708 | ins_pipe( pipe_slow ); |
kvn@4001 | 2709 | %} |
kvn@4001 | 2710 | |
kvn@4001 | 2711 | instruct vadd4L_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2712 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 2713 | match(Set dst (AddVL src1 src2)); |
kvn@4001 | 2714 | format %{ "vpaddq $dst,$src1,$src2\t! add packed4L" %} |
kvn@4001 | 2715 | ins_encode %{ |
kvn@4001 | 2716 | bool vector256 = true; |
kvn@4001 | 2717 | __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2718 | %} |
kvn@4001 | 2719 | ins_pipe( pipe_slow ); |
kvn@4001 | 2720 | %} |
kvn@4001 | 2721 | |
kvn@4001 | 2722 | instruct vadd4L_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2723 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 2724 | match(Set dst (AddVL src (LoadVector mem))); |
kvn@4001 | 2725 | format %{ "vpaddq $dst,$src,$mem\t! add packed4L" %} |
kvn@4001 | 2726 | ins_encode %{ |
kvn@4001 | 2727 | bool vector256 = true; |
kvn@4001 | 2728 | __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2729 | %} |
kvn@4001 | 2730 | ins_pipe( pipe_slow ); |
kvn@4001 | 2731 | %} |
kvn@4001 | 2732 | |
kvn@4001 | 2733 | // Floats vector add |
kvn@4001 | 2734 | instruct vadd2F(vecD dst, vecD src) %{ |
kvn@4001 | 2735 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2736 | match(Set dst (AddVF dst src)); |
kvn@4001 | 2737 | format %{ "addps $dst,$src\t! add packed2F" %} |
kvn@4001 | 2738 | ins_encode %{ |
kvn@4001 | 2739 | __ addps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2740 | %} |
kvn@4001 | 2741 | ins_pipe( pipe_slow ); |
kvn@4001 | 2742 | %} |
kvn@4001 | 2743 | |
kvn@4001 | 2744 | instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2745 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2746 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2747 | format %{ "vaddps $dst,$src1,$src2\t! add packed2F" %} |
kvn@4001 | 2748 | ins_encode %{ |
kvn@4001 | 2749 | bool vector256 = false; |
kvn@4001 | 2750 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2751 | %} |
kvn@4001 | 2752 | ins_pipe( pipe_slow ); |
kvn@4001 | 2753 | %} |
kvn@4001 | 2754 | |
kvn@4001 | 2755 | instruct vadd4F(vecX dst, vecX src) %{ |
kvn@4001 | 2756 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2757 | match(Set dst (AddVF dst src)); |
kvn@4001 | 2758 | format %{ "addps $dst,$src\t! add packed4F" %} |
kvn@4001 | 2759 | ins_encode %{ |
kvn@4001 | 2760 | __ addps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2761 | %} |
kvn@4001 | 2762 | ins_pipe( pipe_slow ); |
kvn@4001 | 2763 | %} |
kvn@4001 | 2764 | |
kvn@4001 | 2765 | instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2766 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2767 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2768 | format %{ "vaddps $dst,$src1,$src2\t! add packed4F" %} |
kvn@4001 | 2769 | ins_encode %{ |
kvn@4001 | 2770 | bool vector256 = false; |
kvn@4001 | 2771 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2772 | %} |
kvn@4001 | 2773 | ins_pipe( pipe_slow ); |
kvn@4001 | 2774 | %} |
kvn@4001 | 2775 | |
kvn@4001 | 2776 | instruct vadd4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2777 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2778 | match(Set dst (AddVF src (LoadVector mem))); |
kvn@4001 | 2779 | format %{ "vaddps $dst,$src,$mem\t! add packed4F" %} |
kvn@4001 | 2780 | ins_encode %{ |
kvn@4001 | 2781 | bool vector256 = false; |
kvn@4001 | 2782 | __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2783 | %} |
kvn@4001 | 2784 | ins_pipe( pipe_slow ); |
kvn@4001 | 2785 | %} |
kvn@4001 | 2786 | |
kvn@4001 | 2787 | instruct vadd8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2788 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2789 | match(Set dst (AddVF src1 src2)); |
kvn@4001 | 2790 | format %{ "vaddps $dst,$src1,$src2\t! add packed8F" %} |
kvn@4001 | 2791 | ins_encode %{ |
kvn@4001 | 2792 | bool vector256 = true; |
kvn@4001 | 2793 | __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2794 | %} |
kvn@4001 | 2795 | ins_pipe( pipe_slow ); |
kvn@4001 | 2796 | %} |
kvn@4001 | 2797 | |
kvn@4001 | 2798 | instruct vadd8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2799 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2800 | match(Set dst (AddVF src (LoadVector mem))); |
kvn@4001 | 2801 | format %{ "vaddps $dst,$src,$mem\t! add packed8F" %} |
kvn@4001 | 2802 | ins_encode %{ |
kvn@4001 | 2803 | bool vector256 = true; |
kvn@4001 | 2804 | __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2805 | %} |
kvn@4001 | 2806 | ins_pipe( pipe_slow ); |
kvn@4001 | 2807 | %} |
kvn@4001 | 2808 | |
kvn@4001 | 2809 | // Doubles vector add |
kvn@4001 | 2810 | instruct vadd2D(vecX dst, vecX src) %{ |
kvn@4001 | 2811 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2812 | match(Set dst (AddVD dst src)); |
kvn@4001 | 2813 | format %{ "addpd $dst,$src\t! add packed2D" %} |
kvn@4001 | 2814 | ins_encode %{ |
kvn@4001 | 2815 | __ addpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2816 | %} |
kvn@4001 | 2817 | ins_pipe( pipe_slow ); |
kvn@4001 | 2818 | %} |
kvn@4001 | 2819 | |
kvn@4001 | 2820 | instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2821 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2822 | match(Set dst (AddVD src1 src2)); |
kvn@4001 | 2823 | format %{ "vaddpd $dst,$src1,$src2\t! add packed2D" %} |
kvn@4001 | 2824 | ins_encode %{ |
kvn@4001 | 2825 | bool vector256 = false; |
kvn@4001 | 2826 | __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2827 | %} |
kvn@4001 | 2828 | ins_pipe( pipe_slow ); |
kvn@4001 | 2829 | %} |
kvn@4001 | 2830 | |
kvn@4001 | 2831 | instruct vadd2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2832 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2833 | match(Set dst (AddVD src (LoadVector mem))); |
kvn@4001 | 2834 | format %{ "vaddpd $dst,$src,$mem\t! add packed2D" %} |
kvn@4001 | 2835 | ins_encode %{ |
kvn@4001 | 2836 | bool vector256 = false; |
kvn@4001 | 2837 | __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2838 | %} |
kvn@4001 | 2839 | ins_pipe( pipe_slow ); |
kvn@4001 | 2840 | %} |
kvn@4001 | 2841 | |
kvn@4001 | 2842 | instruct vadd4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2843 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2844 | match(Set dst (AddVD src1 src2)); |
kvn@4001 | 2845 | format %{ "vaddpd $dst,$src1,$src2\t! add packed4D" %} |
kvn@4001 | 2846 | ins_encode %{ |
kvn@4001 | 2847 | bool vector256 = true; |
kvn@4001 | 2848 | __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2849 | %} |
kvn@4001 | 2850 | ins_pipe( pipe_slow ); |
kvn@4001 | 2851 | %} |
kvn@4001 | 2852 | |
kvn@4001 | 2853 | instruct vadd4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2854 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2855 | match(Set dst (AddVD src (LoadVector mem))); |
kvn@4001 | 2856 | format %{ "vaddpd $dst,$src,$mem\t! add packed4D" %} |
kvn@4001 | 2857 | ins_encode %{ |
kvn@4001 | 2858 | bool vector256 = true; |
kvn@4001 | 2859 | __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2860 | %} |
kvn@4001 | 2861 | ins_pipe( pipe_slow ); |
kvn@4001 | 2862 | %} |
kvn@4001 | 2863 | |
kvn@4001 | 2864 | // --------------------------------- SUB -------------------------------------- |
kvn@4001 | 2865 | |
kvn@4001 | 2866 | // Bytes vector sub |
kvn@4001 | 2867 | instruct vsub4B(vecS dst, vecS src) %{ |
kvn@4001 | 2868 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2869 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2870 | format %{ "psubb $dst,$src\t! sub packed4B" %} |
kvn@4001 | 2871 | ins_encode %{ |
kvn@4001 | 2872 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2873 | %} |
kvn@4001 | 2874 | ins_pipe( pipe_slow ); |
kvn@4001 | 2875 | %} |
kvn@4001 | 2876 | |
kvn@4001 | 2877 | instruct vsub4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2878 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2879 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2880 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed4B" %} |
kvn@4001 | 2881 | ins_encode %{ |
kvn@4001 | 2882 | bool vector256 = false; |
kvn@4001 | 2883 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2884 | %} |
kvn@4001 | 2885 | ins_pipe( pipe_slow ); |
kvn@4001 | 2886 | %} |
kvn@4001 | 2887 | |
kvn@4001 | 2888 | instruct vsub8B(vecD dst, vecD src) %{ |
kvn@4001 | 2889 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 2890 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2891 | format %{ "psubb $dst,$src\t! sub packed8B" %} |
kvn@4001 | 2892 | ins_encode %{ |
kvn@4001 | 2893 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2894 | %} |
kvn@4001 | 2895 | ins_pipe( pipe_slow ); |
kvn@4001 | 2896 | %} |
kvn@4001 | 2897 | |
kvn@4001 | 2898 | instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2899 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 2900 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2901 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed8B" %} |
kvn@4001 | 2902 | ins_encode %{ |
kvn@4001 | 2903 | bool vector256 = false; |
kvn@4001 | 2904 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2905 | %} |
kvn@4001 | 2906 | ins_pipe( pipe_slow ); |
kvn@4001 | 2907 | %} |
kvn@4001 | 2908 | |
kvn@4001 | 2909 | instruct vsub16B(vecX dst, vecX src) %{ |
kvn@4001 | 2910 | predicate(n->as_Vector()->length() == 16); |
kvn@4001 | 2911 | match(Set dst (SubVB dst src)); |
kvn@4001 | 2912 | format %{ "psubb $dst,$src\t! sub packed16B" %} |
kvn@4001 | 2913 | ins_encode %{ |
kvn@4001 | 2914 | __ psubb($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2915 | %} |
kvn@4001 | 2916 | ins_pipe( pipe_slow ); |
kvn@4001 | 2917 | %} |
kvn@4001 | 2918 | |
kvn@4001 | 2919 | instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 2920 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2921 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2922 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed16B" %} |
kvn@4001 | 2923 | ins_encode %{ |
kvn@4001 | 2924 | bool vector256 = false; |
kvn@4001 | 2925 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2926 | %} |
kvn@4001 | 2927 | ins_pipe( pipe_slow ); |
kvn@4001 | 2928 | %} |
kvn@4001 | 2929 | |
kvn@4001 | 2930 | instruct vsub16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 2931 | predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
kvn@4001 | 2932 | match(Set dst (SubVB src (LoadVector mem))); |
kvn@4001 | 2933 | format %{ "vpsubb $dst,$src,$mem\t! sub packed16B" %} |
kvn@4001 | 2934 | ins_encode %{ |
kvn@4001 | 2935 | bool vector256 = false; |
kvn@4001 | 2936 | __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2937 | %} |
kvn@4001 | 2938 | ins_pipe( pipe_slow ); |
kvn@4001 | 2939 | %} |
kvn@4001 | 2940 | |
kvn@4001 | 2941 | instruct vsub32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 2942 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2943 | match(Set dst (SubVB src1 src2)); |
kvn@4001 | 2944 | format %{ "vpsubb $dst,$src1,$src2\t! sub packed32B" %} |
kvn@4001 | 2945 | ins_encode %{ |
kvn@4001 | 2946 | bool vector256 = true; |
kvn@4001 | 2947 | __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2948 | %} |
kvn@4001 | 2949 | ins_pipe( pipe_slow ); |
kvn@4001 | 2950 | %} |
kvn@4001 | 2951 | |
kvn@4001 | 2952 | instruct vsub32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 2953 | predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
kvn@4001 | 2954 | match(Set dst (SubVB src (LoadVector mem))); |
kvn@4001 | 2955 | format %{ "vpsubb $dst,$src,$mem\t! sub packed32B" %} |
kvn@4001 | 2956 | ins_encode %{ |
kvn@4001 | 2957 | bool vector256 = true; |
kvn@4001 | 2958 | __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 2959 | %} |
kvn@4001 | 2960 | ins_pipe( pipe_slow ); |
kvn@4001 | 2961 | %} |
kvn@4001 | 2962 | |
kvn@4001 | 2963 | // Shorts/Chars vector sub |
kvn@4001 | 2964 | instruct vsub2S(vecS dst, vecS src) %{ |
kvn@4001 | 2965 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 2966 | match(Set dst (SubVS dst src)); |
kvn@4001 | 2967 | format %{ "psubw $dst,$src\t! sub packed2S" %} |
kvn@4001 | 2968 | ins_encode %{ |
kvn@4001 | 2969 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2970 | %} |
kvn@4001 | 2971 | ins_pipe( pipe_slow ); |
kvn@4001 | 2972 | %} |
kvn@4001 | 2973 | |
kvn@4001 | 2974 | instruct vsub2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 2975 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 2976 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 2977 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed2S" %} |
kvn@4001 | 2978 | ins_encode %{ |
kvn@4001 | 2979 | bool vector256 = false; |
kvn@4001 | 2980 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 2981 | %} |
kvn@4001 | 2982 | ins_pipe( pipe_slow ); |
kvn@4001 | 2983 | %} |
kvn@4001 | 2984 | |
kvn@4001 | 2985 | instruct vsub4S(vecD dst, vecD src) %{ |
kvn@4001 | 2986 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 2987 | match(Set dst (SubVS dst src)); |
kvn@4001 | 2988 | format %{ "psubw $dst,$src\t! sub packed4S" %} |
kvn@4001 | 2989 | ins_encode %{ |
kvn@4001 | 2990 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 2991 | %} |
kvn@4001 | 2992 | ins_pipe( pipe_slow ); |
kvn@4001 | 2993 | %} |
kvn@4001 | 2994 | |
kvn@4001 | 2995 | instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 2996 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 2997 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 2998 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed4S" %} |
kvn@4001 | 2999 | ins_encode %{ |
kvn@4001 | 3000 | bool vector256 = false; |
kvn@4001 | 3001 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3002 | %} |
kvn@4001 | 3003 | ins_pipe( pipe_slow ); |
kvn@4001 | 3004 | %} |
kvn@4001 | 3005 | |
kvn@4001 | 3006 | instruct vsub8S(vecX dst, vecX src) %{ |
kvn@4001 | 3007 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3008 | match(Set dst (SubVS dst src)); |
kvn@4001 | 3009 | format %{ "psubw $dst,$src\t! sub packed8S" %} |
kvn@4001 | 3010 | ins_encode %{ |
kvn@4001 | 3011 | __ psubw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3012 | %} |
kvn@4001 | 3013 | ins_pipe( pipe_slow ); |
kvn@4001 | 3014 | %} |
kvn@4001 | 3015 | |
kvn@4001 | 3016 | instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3017 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3018 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 3019 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed8S" %} |
kvn@4001 | 3020 | ins_encode %{ |
kvn@4001 | 3021 | bool vector256 = false; |
kvn@4001 | 3022 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3023 | %} |
kvn@4001 | 3024 | ins_pipe( pipe_slow ); |
kvn@4001 | 3025 | %} |
kvn@4001 | 3026 | |
kvn@4001 | 3027 | instruct vsub8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3028 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3029 | match(Set dst (SubVS src (LoadVector mem))); |
kvn@4001 | 3030 | format %{ "vpsubw $dst,$src,$mem\t! sub packed8S" %} |
kvn@4001 | 3031 | ins_encode %{ |
kvn@4001 | 3032 | bool vector256 = false; |
kvn@4001 | 3033 | __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3034 | %} |
kvn@4001 | 3035 | ins_pipe( pipe_slow ); |
kvn@4001 | 3036 | %} |
kvn@4001 | 3037 | |
kvn@4001 | 3038 | instruct vsub16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3039 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3040 | match(Set dst (SubVS src1 src2)); |
kvn@4001 | 3041 | format %{ "vpsubw $dst,$src1,$src2\t! sub packed16S" %} |
kvn@4001 | 3042 | ins_encode %{ |
kvn@4001 | 3043 | bool vector256 = true; |
kvn@4001 | 3044 | __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3045 | %} |
kvn@4001 | 3046 | ins_pipe( pipe_slow ); |
kvn@4001 | 3047 | %} |
kvn@4001 | 3048 | |
kvn@4001 | 3049 | instruct vsub16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3050 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3051 | match(Set dst (SubVS src (LoadVector mem))); |
kvn@4001 | 3052 | format %{ "vpsubw $dst,$src,$mem\t! sub packed16S" %} |
kvn@4001 | 3053 | ins_encode %{ |
kvn@4001 | 3054 | bool vector256 = true; |
kvn@4001 | 3055 | __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3056 | %} |
kvn@4001 | 3057 | ins_pipe( pipe_slow ); |
kvn@4001 | 3058 | %} |
kvn@4001 | 3059 | |
kvn@4001 | 3060 | // Integers vector sub |
kvn@4001 | 3061 | instruct vsub2I(vecD dst, vecD src) %{ |
kvn@4001 | 3062 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3063 | match(Set dst (SubVI dst src)); |
kvn@4001 | 3064 | format %{ "psubd $dst,$src\t! sub packed2I" %} |
kvn@4001 | 3065 | ins_encode %{ |
kvn@4001 | 3066 | __ psubd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3067 | %} |
kvn@4001 | 3068 | ins_pipe( pipe_slow ); |
kvn@4001 | 3069 | %} |
kvn@4001 | 3070 | |
kvn@4001 | 3071 | instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3072 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3073 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3074 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed2I" %} |
kvn@4001 | 3075 | ins_encode %{ |
kvn@4001 | 3076 | bool vector256 = false; |
kvn@4001 | 3077 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3078 | %} |
kvn@4001 | 3079 | ins_pipe( pipe_slow ); |
kvn@4001 | 3080 | %} |
kvn@4001 | 3081 | |
kvn@4001 | 3082 | instruct vsub4I(vecX dst, vecX src) %{ |
kvn@4001 | 3083 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3084 | match(Set dst (SubVI dst src)); |
kvn@4001 | 3085 | format %{ "psubd $dst,$src\t! sub packed4I" %} |
kvn@4001 | 3086 | ins_encode %{ |
kvn@4001 | 3087 | __ psubd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3088 | %} |
kvn@4001 | 3089 | ins_pipe( pipe_slow ); |
kvn@4001 | 3090 | %} |
kvn@4001 | 3091 | |
kvn@4001 | 3092 | instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3093 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3094 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3095 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed4I" %} |
kvn@4001 | 3096 | ins_encode %{ |
kvn@4001 | 3097 | bool vector256 = false; |
kvn@4001 | 3098 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3099 | %} |
kvn@4001 | 3100 | ins_pipe( pipe_slow ); |
kvn@4001 | 3101 | %} |
kvn@4001 | 3102 | |
kvn@4001 | 3103 | instruct vsub4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3104 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3105 | match(Set dst (SubVI src (LoadVector mem))); |
kvn@4001 | 3106 | format %{ "vpsubd $dst,$src,$mem\t! sub packed4I" %} |
kvn@4001 | 3107 | ins_encode %{ |
kvn@4001 | 3108 | bool vector256 = false; |
kvn@4001 | 3109 | __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3110 | %} |
kvn@4001 | 3111 | ins_pipe( pipe_slow ); |
kvn@4001 | 3112 | %} |
kvn@4001 | 3113 | |
kvn@4001 | 3114 | instruct vsub8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3115 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3116 | match(Set dst (SubVI src1 src2)); |
kvn@4001 | 3117 | format %{ "vpsubd $dst,$src1,$src2\t! sub packed8I" %} |
kvn@4001 | 3118 | ins_encode %{ |
kvn@4001 | 3119 | bool vector256 = true; |
kvn@4001 | 3120 | __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3121 | %} |
kvn@4001 | 3122 | ins_pipe( pipe_slow ); |
kvn@4001 | 3123 | %} |
kvn@4001 | 3124 | |
kvn@4001 | 3125 | instruct vsub8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3126 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3127 | match(Set dst (SubVI src (LoadVector mem))); |
kvn@4001 | 3128 | format %{ "vpsubd $dst,$src,$mem\t! sub packed8I" %} |
kvn@4001 | 3129 | ins_encode %{ |
kvn@4001 | 3130 | bool vector256 = true; |
kvn@4001 | 3131 | __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3132 | %} |
kvn@4001 | 3133 | ins_pipe( pipe_slow ); |
kvn@4001 | 3134 | %} |
kvn@4001 | 3135 | |
kvn@4001 | 3136 | // Longs vector sub |
kvn@4001 | 3137 | instruct vsub2L(vecX dst, vecX src) %{ |
kvn@4001 | 3138 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3139 | match(Set dst (SubVL dst src)); |
kvn@4001 | 3140 | format %{ "psubq $dst,$src\t! sub packed2L" %} |
kvn@4001 | 3141 | ins_encode %{ |
kvn@4001 | 3142 | __ psubq($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3143 | %} |
kvn@4001 | 3144 | ins_pipe( pipe_slow ); |
kvn@4001 | 3145 | %} |
kvn@4001 | 3146 | |
kvn@4001 | 3147 | instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3148 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3149 | match(Set dst (SubVL src1 src2)); |
kvn@4001 | 3150 | format %{ "vpsubq $dst,$src1,$src2\t! sub packed2L" %} |
kvn@4001 | 3151 | ins_encode %{ |
kvn@4001 | 3152 | bool vector256 = false; |
kvn@4001 | 3153 | __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3154 | %} |
kvn@4001 | 3155 | ins_pipe( pipe_slow ); |
kvn@4001 | 3156 | %} |
kvn@4001 | 3157 | |
kvn@4001 | 3158 | instruct vsub2L_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3159 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3160 | match(Set dst (SubVL src (LoadVector mem))); |
kvn@4001 | 3161 | format %{ "vpsubq $dst,$src,$mem\t! sub packed2L" %} |
kvn@4001 | 3162 | ins_encode %{ |
kvn@4001 | 3163 | bool vector256 = false; |
kvn@4001 | 3164 | __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3165 | %} |
kvn@4001 | 3166 | ins_pipe( pipe_slow ); |
kvn@4001 | 3167 | %} |
kvn@4001 | 3168 | |
kvn@4001 | 3169 | instruct vsub4L_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3170 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 3171 | match(Set dst (SubVL src1 src2)); |
kvn@4001 | 3172 | format %{ "vpsubq $dst,$src1,$src2\t! sub packed4L" %} |
kvn@4001 | 3173 | ins_encode %{ |
kvn@4001 | 3174 | bool vector256 = true; |
kvn@4001 | 3175 | __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3176 | %} |
kvn@4001 | 3177 | ins_pipe( pipe_slow ); |
kvn@4001 | 3178 | %} |
kvn@4001 | 3179 | |
kvn@4001 | 3180 | instruct vsub4L_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3181 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 3182 | match(Set dst (SubVL src (LoadVector mem))); |
kvn@4001 | 3183 | format %{ "vpsubq $dst,$src,$mem\t! sub packed4L" %} |
kvn@4001 | 3184 | ins_encode %{ |
kvn@4001 | 3185 | bool vector256 = true; |
kvn@4001 | 3186 | __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3187 | %} |
kvn@4001 | 3188 | ins_pipe( pipe_slow ); |
kvn@4001 | 3189 | %} |
kvn@4001 | 3190 | |
kvn@4001 | 3191 | // Floats vector sub |
kvn@4001 | 3192 | instruct vsub2F(vecD dst, vecD src) %{ |
kvn@4001 | 3193 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3194 | match(Set dst (SubVF dst src)); |
kvn@4001 | 3195 | format %{ "subps $dst,$src\t! sub packed2F" %} |
kvn@4001 | 3196 | ins_encode %{ |
kvn@4001 | 3197 | __ subps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3198 | %} |
kvn@4001 | 3199 | ins_pipe( pipe_slow ); |
kvn@4001 | 3200 | %} |
kvn@4001 | 3201 | |
kvn@4001 | 3202 | instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3203 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3204 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3205 | format %{ "vsubps $dst,$src1,$src2\t! sub packed2F" %} |
kvn@4001 | 3206 | ins_encode %{ |
kvn@4001 | 3207 | bool vector256 = false; |
kvn@4001 | 3208 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3209 | %} |
kvn@4001 | 3210 | ins_pipe( pipe_slow ); |
kvn@4001 | 3211 | %} |
kvn@4001 | 3212 | |
kvn@4001 | 3213 | instruct vsub4F(vecX dst, vecX src) %{ |
kvn@4001 | 3214 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3215 | match(Set dst (SubVF dst src)); |
kvn@4001 | 3216 | format %{ "subps $dst,$src\t! sub packed4F" %} |
kvn@4001 | 3217 | ins_encode %{ |
kvn@4001 | 3218 | __ subps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3219 | %} |
kvn@4001 | 3220 | ins_pipe( pipe_slow ); |
kvn@4001 | 3221 | %} |
kvn@4001 | 3222 | |
kvn@4001 | 3223 | instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3224 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3225 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3226 | format %{ "vsubps $dst,$src1,$src2\t! sub packed4F" %} |
kvn@4001 | 3227 | ins_encode %{ |
kvn@4001 | 3228 | bool vector256 = false; |
kvn@4001 | 3229 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3230 | %} |
kvn@4001 | 3231 | ins_pipe( pipe_slow ); |
kvn@4001 | 3232 | %} |
kvn@4001 | 3233 | |
kvn@4001 | 3234 | instruct vsub4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3235 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3236 | match(Set dst (SubVF src (LoadVector mem))); |
kvn@4001 | 3237 | format %{ "vsubps $dst,$src,$mem\t! sub packed4F" %} |
kvn@4001 | 3238 | ins_encode %{ |
kvn@4001 | 3239 | bool vector256 = false; |
kvn@4001 | 3240 | __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3241 | %} |
kvn@4001 | 3242 | ins_pipe( pipe_slow ); |
kvn@4001 | 3243 | %} |
kvn@4001 | 3244 | |
kvn@4001 | 3245 | instruct vsub8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3246 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3247 | match(Set dst (SubVF src1 src2)); |
kvn@4001 | 3248 | format %{ "vsubps $dst,$src1,$src2\t! sub packed8F" %} |
kvn@4001 | 3249 | ins_encode %{ |
kvn@4001 | 3250 | bool vector256 = true; |
kvn@4001 | 3251 | __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3252 | %} |
kvn@4001 | 3253 | ins_pipe( pipe_slow ); |
kvn@4001 | 3254 | %} |
kvn@4001 | 3255 | |
kvn@4001 | 3256 | instruct vsub8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3257 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3258 | match(Set dst (SubVF src (LoadVector mem))); |
kvn@4001 | 3259 | format %{ "vsubps $dst,$src,$mem\t! sub packed8F" %} |
kvn@4001 | 3260 | ins_encode %{ |
kvn@4001 | 3261 | bool vector256 = true; |
kvn@4001 | 3262 | __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3263 | %} |
kvn@4001 | 3264 | ins_pipe( pipe_slow ); |
kvn@4001 | 3265 | %} |
kvn@4001 | 3266 | |
kvn@4001 | 3267 | // Doubles vector sub |
kvn@4001 | 3268 | instruct vsub2D(vecX dst, vecX src) %{ |
kvn@4001 | 3269 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3270 | match(Set dst (SubVD dst src)); |
kvn@4001 | 3271 | format %{ "subpd $dst,$src\t! sub packed2D" %} |
kvn@4001 | 3272 | ins_encode %{ |
kvn@4001 | 3273 | __ subpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3274 | %} |
kvn@4001 | 3275 | ins_pipe( pipe_slow ); |
kvn@4001 | 3276 | %} |
kvn@4001 | 3277 | |
kvn@4001 | 3278 | instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3279 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3280 | match(Set dst (SubVD src1 src2)); |
kvn@4001 | 3281 | format %{ "vsubpd $dst,$src1,$src2\t! sub packed2D" %} |
kvn@4001 | 3282 | ins_encode %{ |
kvn@4001 | 3283 | bool vector256 = false; |
kvn@4001 | 3284 | __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3285 | %} |
kvn@4001 | 3286 | ins_pipe( pipe_slow ); |
kvn@4001 | 3287 | %} |
kvn@4001 | 3288 | |
kvn@4001 | 3289 | instruct vsub2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3290 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3291 | match(Set dst (SubVD src (LoadVector mem))); |
kvn@4001 | 3292 | format %{ "vsubpd $dst,$src,$mem\t! sub packed2D" %} |
kvn@4001 | 3293 | ins_encode %{ |
kvn@4001 | 3294 | bool vector256 = false; |
kvn@4001 | 3295 | __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3296 | %} |
kvn@4001 | 3297 | ins_pipe( pipe_slow ); |
kvn@4001 | 3298 | %} |
kvn@4001 | 3299 | |
kvn@4001 | 3300 | instruct vsub4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3301 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3302 | match(Set dst (SubVD src1 src2)); |
kvn@4001 | 3303 | format %{ "vsubpd $dst,$src1,$src2\t! sub packed4D" %} |
kvn@4001 | 3304 | ins_encode %{ |
kvn@4001 | 3305 | bool vector256 = true; |
kvn@4001 | 3306 | __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3307 | %} |
kvn@4001 | 3308 | ins_pipe( pipe_slow ); |
kvn@4001 | 3309 | %} |
kvn@4001 | 3310 | |
kvn@4001 | 3311 | instruct vsub4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3312 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3313 | match(Set dst (SubVD src (LoadVector mem))); |
kvn@4001 | 3314 | format %{ "vsubpd $dst,$src,$mem\t! sub packed4D" %} |
kvn@4001 | 3315 | ins_encode %{ |
kvn@4001 | 3316 | bool vector256 = true; |
kvn@4001 | 3317 | __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3318 | %} |
kvn@4001 | 3319 | ins_pipe( pipe_slow ); |
kvn@4001 | 3320 | %} |
kvn@4001 | 3321 | |
kvn@4001 | 3322 | // --------------------------------- MUL -------------------------------------- |
kvn@4001 | 3323 | |
kvn@4001 | 3324 | // Shorts/Chars vector mul |
kvn@4001 | 3325 | instruct vmul2S(vecS dst, vecS src) %{ |
kvn@4001 | 3326 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3327 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3328 | format %{ "pmullw $dst,$src\t! mul packed2S" %} |
kvn@4001 | 3329 | ins_encode %{ |
kvn@4001 | 3330 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3331 | %} |
kvn@4001 | 3332 | ins_pipe( pipe_slow ); |
kvn@4001 | 3333 | %} |
kvn@4001 | 3334 | |
kvn@4001 | 3335 | instruct vmul2S_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 3336 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3337 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3338 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed2S" %} |
kvn@4001 | 3339 | ins_encode %{ |
kvn@4001 | 3340 | bool vector256 = false; |
kvn@4001 | 3341 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3342 | %} |
kvn@4001 | 3343 | ins_pipe( pipe_slow ); |
kvn@4001 | 3344 | %} |
kvn@4001 | 3345 | |
kvn@4001 | 3346 | instruct vmul4S(vecD dst, vecD src) %{ |
kvn@4001 | 3347 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3348 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3349 | format %{ "pmullw $dst,$src\t! mul packed4S" %} |
kvn@4001 | 3350 | ins_encode %{ |
kvn@4001 | 3351 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3352 | %} |
kvn@4001 | 3353 | ins_pipe( pipe_slow ); |
kvn@4001 | 3354 | %} |
kvn@4001 | 3355 | |
kvn@4001 | 3356 | instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3357 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3358 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3359 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed4S" %} |
kvn@4001 | 3360 | ins_encode %{ |
kvn@4001 | 3361 | bool vector256 = false; |
kvn@4001 | 3362 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3363 | %} |
kvn@4001 | 3364 | ins_pipe( pipe_slow ); |
kvn@4001 | 3365 | %} |
kvn@4001 | 3366 | |
kvn@4001 | 3367 | instruct vmul8S(vecX dst, vecX src) %{ |
kvn@4001 | 3368 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3369 | match(Set dst (MulVS dst src)); |
kvn@4001 | 3370 | format %{ "pmullw $dst,$src\t! mul packed8S" %} |
kvn@4001 | 3371 | ins_encode %{ |
kvn@4001 | 3372 | __ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3373 | %} |
kvn@4001 | 3374 | ins_pipe( pipe_slow ); |
kvn@4001 | 3375 | %} |
kvn@4001 | 3376 | |
kvn@4001 | 3377 | instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3378 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3379 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3380 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed8S" %} |
kvn@4001 | 3381 | ins_encode %{ |
kvn@4001 | 3382 | bool vector256 = false; |
kvn@4001 | 3383 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3384 | %} |
kvn@4001 | 3385 | ins_pipe( pipe_slow ); |
kvn@4001 | 3386 | %} |
kvn@4001 | 3387 | |
kvn@4001 | 3388 | instruct vmul8S_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3389 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3390 | match(Set dst (MulVS src (LoadVector mem))); |
kvn@4001 | 3391 | format %{ "vpmullw $dst,$src,$mem\t! mul packed8S" %} |
kvn@4001 | 3392 | ins_encode %{ |
kvn@4001 | 3393 | bool vector256 = false; |
kvn@4001 | 3394 | __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3395 | %} |
kvn@4001 | 3396 | ins_pipe( pipe_slow ); |
kvn@4001 | 3397 | %} |
kvn@4001 | 3398 | |
kvn@4001 | 3399 | instruct vmul16S_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3400 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3401 | match(Set dst (MulVS src1 src2)); |
kvn@4001 | 3402 | format %{ "vpmullw $dst,$src1,$src2\t! mul packed16S" %} |
kvn@4001 | 3403 | ins_encode %{ |
kvn@4001 | 3404 | bool vector256 = true; |
kvn@4001 | 3405 | __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3406 | %} |
kvn@4001 | 3407 | ins_pipe( pipe_slow ); |
kvn@4001 | 3408 | %} |
kvn@4001 | 3409 | |
kvn@4001 | 3410 | instruct vmul16S_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3411 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3412 | match(Set dst (MulVS src (LoadVector mem))); |
kvn@4001 | 3413 | format %{ "vpmullw $dst,$src,$mem\t! mul packed16S" %} |
kvn@4001 | 3414 | ins_encode %{ |
kvn@4001 | 3415 | bool vector256 = true; |
kvn@4001 | 3416 | __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3417 | %} |
kvn@4001 | 3418 | ins_pipe( pipe_slow ); |
kvn@4001 | 3419 | %} |
kvn@4001 | 3420 | |
kvn@4001 | 3421 | // Integers vector mul (sse4_1) |
kvn@4001 | 3422 | instruct vmul2I(vecD dst, vecD src) %{ |
kvn@4001 | 3423 | predicate(UseSSE > 3 && n->as_Vector()->length() == 2); |
kvn@4001 | 3424 | match(Set dst (MulVI dst src)); |
kvn@4001 | 3425 | format %{ "pmulld $dst,$src\t! mul packed2I" %} |
kvn@4001 | 3426 | ins_encode %{ |
kvn@4001 | 3427 | __ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3428 | %} |
kvn@4001 | 3429 | ins_pipe( pipe_slow ); |
kvn@4001 | 3430 | %} |
kvn@4001 | 3431 | |
kvn@4001 | 3432 | instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3433 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3434 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3435 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed2I" %} |
kvn@4001 | 3436 | ins_encode %{ |
kvn@4001 | 3437 | bool vector256 = false; |
kvn@4001 | 3438 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3439 | %} |
kvn@4001 | 3440 | ins_pipe( pipe_slow ); |
kvn@4001 | 3441 | %} |
kvn@4001 | 3442 | |
kvn@4001 | 3443 | instruct vmul4I(vecX dst, vecX src) %{ |
kvn@4001 | 3444 | predicate(UseSSE > 3 && n->as_Vector()->length() == 4); |
kvn@4001 | 3445 | match(Set dst (MulVI dst src)); |
kvn@4001 | 3446 | format %{ "pmulld $dst,$src\t! mul packed4I" %} |
kvn@4001 | 3447 | ins_encode %{ |
kvn@4001 | 3448 | __ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3449 | %} |
kvn@4001 | 3450 | ins_pipe( pipe_slow ); |
kvn@4001 | 3451 | %} |
kvn@4001 | 3452 | |
kvn@4001 | 3453 | instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3454 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3455 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3456 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed4I" %} |
kvn@4001 | 3457 | ins_encode %{ |
kvn@4001 | 3458 | bool vector256 = false; |
kvn@4001 | 3459 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3460 | %} |
kvn@4001 | 3461 | ins_pipe( pipe_slow ); |
kvn@4001 | 3462 | %} |
kvn@4001 | 3463 | |
kvn@4001 | 3464 | instruct vmul4I_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3465 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3466 | match(Set dst (MulVI src (LoadVector mem))); |
kvn@4001 | 3467 | format %{ "vpmulld $dst,$src,$mem\t! mul packed4I" %} |
kvn@4001 | 3468 | ins_encode %{ |
kvn@4001 | 3469 | bool vector256 = false; |
kvn@4001 | 3470 | __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3471 | %} |
kvn@4001 | 3472 | ins_pipe( pipe_slow ); |
kvn@4001 | 3473 | %} |
kvn@4001 | 3474 | |
kvn@4001 | 3475 | instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3476 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3477 | match(Set dst (MulVI src1 src2)); |
kvn@4001 | 3478 | format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %} |
kvn@4001 | 3479 | ins_encode %{ |
kvn@4001 | 3480 | bool vector256 = true; |
kvn@4001 | 3481 | __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3482 | %} |
kvn@4001 | 3483 | ins_pipe( pipe_slow ); |
kvn@4001 | 3484 | %} |
kvn@4001 | 3485 | |
kvn@4001 | 3486 | instruct vmul8I_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3487 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3488 | match(Set dst (MulVI src (LoadVector mem))); |
kvn@4001 | 3489 | format %{ "vpmulld $dst,$src,$mem\t! mul packed8I" %} |
kvn@4001 | 3490 | ins_encode %{ |
kvn@4001 | 3491 | bool vector256 = true; |
kvn@4001 | 3492 | __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3493 | %} |
kvn@4001 | 3494 | ins_pipe( pipe_slow ); |
kvn@4001 | 3495 | %} |
kvn@4001 | 3496 | |
kvn@4001 | 3497 | // Floats vector mul |
kvn@4001 | 3498 | instruct vmul2F(vecD dst, vecD src) %{ |
kvn@4001 | 3499 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3500 | match(Set dst (MulVF dst src)); |
kvn@4001 | 3501 | format %{ "mulps $dst,$src\t! mul packed2F" %} |
kvn@4001 | 3502 | ins_encode %{ |
kvn@4001 | 3503 | __ mulps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3504 | %} |
kvn@4001 | 3505 | ins_pipe( pipe_slow ); |
kvn@4001 | 3506 | %} |
kvn@4001 | 3507 | |
kvn@4001 | 3508 | instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3509 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3510 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3511 | format %{ "vmulps $dst,$src1,$src2\t! mul packed2F" %} |
kvn@4001 | 3512 | ins_encode %{ |
kvn@4001 | 3513 | bool vector256 = false; |
kvn@4001 | 3514 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3515 | %} |
kvn@4001 | 3516 | ins_pipe( pipe_slow ); |
kvn@4001 | 3517 | %} |
kvn@4001 | 3518 | |
kvn@4001 | 3519 | instruct vmul4F(vecX dst, vecX src) %{ |
kvn@4001 | 3520 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3521 | match(Set dst (MulVF dst src)); |
kvn@4001 | 3522 | format %{ "mulps $dst,$src\t! mul packed4F" %} |
kvn@4001 | 3523 | ins_encode %{ |
kvn@4001 | 3524 | __ mulps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3525 | %} |
kvn@4001 | 3526 | ins_pipe( pipe_slow ); |
kvn@4001 | 3527 | %} |
kvn@4001 | 3528 | |
kvn@4001 | 3529 | instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3530 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3531 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3532 | format %{ "vmulps $dst,$src1,$src2\t! mul packed4F" %} |
kvn@4001 | 3533 | ins_encode %{ |
kvn@4001 | 3534 | bool vector256 = false; |
kvn@4001 | 3535 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3536 | %} |
kvn@4001 | 3537 | ins_pipe( pipe_slow ); |
kvn@4001 | 3538 | %} |
kvn@4001 | 3539 | |
kvn@4001 | 3540 | instruct vmul4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3541 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3542 | match(Set dst (MulVF src (LoadVector mem))); |
kvn@4001 | 3543 | format %{ "vmulps $dst,$src,$mem\t! mul packed4F" %} |
kvn@4001 | 3544 | ins_encode %{ |
kvn@4001 | 3545 | bool vector256 = false; |
kvn@4001 | 3546 | __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3547 | %} |
kvn@4001 | 3548 | ins_pipe( pipe_slow ); |
kvn@4001 | 3549 | %} |
kvn@4001 | 3550 | |
kvn@4001 | 3551 | instruct vmul8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3552 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3553 | match(Set dst (MulVF src1 src2)); |
kvn@4001 | 3554 | format %{ "vmulps $dst,$src1,$src2\t! mul packed8F" %} |
kvn@4001 | 3555 | ins_encode %{ |
kvn@4001 | 3556 | bool vector256 = true; |
kvn@4001 | 3557 | __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3558 | %} |
kvn@4001 | 3559 | ins_pipe( pipe_slow ); |
kvn@4001 | 3560 | %} |
kvn@4001 | 3561 | |
kvn@4001 | 3562 | instruct vmul8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3563 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3564 | match(Set dst (MulVF src (LoadVector mem))); |
kvn@4001 | 3565 | format %{ "vmulps $dst,$src,$mem\t! mul packed8F" %} |
kvn@4001 | 3566 | ins_encode %{ |
kvn@4001 | 3567 | bool vector256 = true; |
kvn@4001 | 3568 | __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3569 | %} |
kvn@4001 | 3570 | ins_pipe( pipe_slow ); |
kvn@4001 | 3571 | %} |
kvn@4001 | 3572 | |
kvn@4001 | 3573 | // Doubles vector mul |
kvn@4001 | 3574 | instruct vmul2D(vecX dst, vecX src) %{ |
kvn@4001 | 3575 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3576 | match(Set dst (MulVD dst src)); |
kvn@4001 | 3577 | format %{ "mulpd $dst,$src\t! mul packed2D" %} |
kvn@4001 | 3578 | ins_encode %{ |
kvn@4001 | 3579 | __ mulpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3580 | %} |
kvn@4001 | 3581 | ins_pipe( pipe_slow ); |
kvn@4001 | 3582 | %} |
kvn@4001 | 3583 | |
kvn@4001 | 3584 | instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3585 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3586 | match(Set dst (MulVD src1 src2)); |
kvn@4001 | 3587 | format %{ "vmulpd $dst,$src1,$src2\t! mul packed2D" %} |
kvn@4001 | 3588 | ins_encode %{ |
kvn@4001 | 3589 | bool vector256 = false; |
kvn@4001 | 3590 | __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3591 | %} |
kvn@4001 | 3592 | ins_pipe( pipe_slow ); |
kvn@4001 | 3593 | %} |
kvn@4001 | 3594 | |
kvn@4001 | 3595 | instruct vmul2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3596 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3597 | match(Set dst (MulVD src (LoadVector mem))); |
kvn@4001 | 3598 | format %{ "vmulpd $dst,$src,$mem\t! mul packed2D" %} |
kvn@4001 | 3599 | ins_encode %{ |
kvn@4001 | 3600 | bool vector256 = false; |
kvn@4001 | 3601 | __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3602 | %} |
kvn@4001 | 3603 | ins_pipe( pipe_slow ); |
kvn@4001 | 3604 | %} |
kvn@4001 | 3605 | |
kvn@4001 | 3606 | instruct vmul4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3607 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3608 | match(Set dst (MulVD src1 src2)); |
kvn@4001 | 3609 | format %{ "vmulpd $dst,$src1,$src2\t! mul packed4D" %} |
kvn@4001 | 3610 | ins_encode %{ |
kvn@4001 | 3611 | bool vector256 = true; |
kvn@4001 | 3612 | __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3613 | %} |
kvn@4001 | 3614 | ins_pipe( pipe_slow ); |
kvn@4001 | 3615 | %} |
kvn@4001 | 3616 | |
kvn@4001 | 3617 | instruct vmul4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3618 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3619 | match(Set dst (MulVD src (LoadVector mem))); |
kvn@4001 | 3620 | format %{ "vmulpd $dst,$src,$mem\t! mul packed4D" %} |
kvn@4001 | 3621 | ins_encode %{ |
kvn@4001 | 3622 | bool vector256 = true; |
kvn@4001 | 3623 | __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3624 | %} |
kvn@4001 | 3625 | ins_pipe( pipe_slow ); |
kvn@4001 | 3626 | %} |
kvn@4001 | 3627 | |
kvn@4001 | 3628 | // --------------------------------- DIV -------------------------------------- |
kvn@4001 | 3629 | |
kvn@4001 | 3630 | // Floats vector div |
kvn@4001 | 3631 | instruct vdiv2F(vecD dst, vecD src) %{ |
kvn@4001 | 3632 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3633 | match(Set dst (DivVF dst src)); |
kvn@4001 | 3634 | format %{ "divps $dst,$src\t! div packed2F" %} |
kvn@4001 | 3635 | ins_encode %{ |
kvn@4001 | 3636 | __ divps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3637 | %} |
kvn@4001 | 3638 | ins_pipe( pipe_slow ); |
kvn@4001 | 3639 | %} |
kvn@4001 | 3640 | |
kvn@4001 | 3641 | instruct vdiv2F_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 3642 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3643 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3644 | format %{ "vdivps $dst,$src1,$src2\t! div packed2F" %} |
kvn@4001 | 3645 | ins_encode %{ |
kvn@4001 | 3646 | bool vector256 = false; |
kvn@4001 | 3647 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3648 | %} |
kvn@4001 | 3649 | ins_pipe( pipe_slow ); |
kvn@4001 | 3650 | %} |
kvn@4001 | 3651 | |
kvn@4001 | 3652 | instruct vdiv4F(vecX dst, vecX src) %{ |
kvn@4001 | 3653 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3654 | match(Set dst (DivVF dst src)); |
kvn@4001 | 3655 | format %{ "divps $dst,$src\t! div packed4F" %} |
kvn@4001 | 3656 | ins_encode %{ |
kvn@4001 | 3657 | __ divps($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3658 | %} |
kvn@4001 | 3659 | ins_pipe( pipe_slow ); |
kvn@4001 | 3660 | %} |
kvn@4001 | 3661 | |
kvn@4001 | 3662 | instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3663 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3664 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3665 | format %{ "vdivps $dst,$src1,$src2\t! div packed4F" %} |
kvn@4001 | 3666 | ins_encode %{ |
kvn@4001 | 3667 | bool vector256 = false; |
kvn@4001 | 3668 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3669 | %} |
kvn@4001 | 3670 | ins_pipe( pipe_slow ); |
kvn@4001 | 3671 | %} |
kvn@4001 | 3672 | |
kvn@4001 | 3673 | instruct vdiv4F_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3674 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3675 | match(Set dst (DivVF src (LoadVector mem))); |
kvn@4001 | 3676 | format %{ "vdivps $dst,$src,$mem\t! div packed4F" %} |
kvn@4001 | 3677 | ins_encode %{ |
kvn@4001 | 3678 | bool vector256 = false; |
kvn@4001 | 3679 | __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3680 | %} |
kvn@4001 | 3681 | ins_pipe( pipe_slow ); |
kvn@4001 | 3682 | %} |
kvn@4001 | 3683 | |
kvn@4001 | 3684 | instruct vdiv8F_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3685 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3686 | match(Set dst (DivVF src1 src2)); |
kvn@4001 | 3687 | format %{ "vdivps $dst,$src1,$src2\t! div packed8F" %} |
kvn@4001 | 3688 | ins_encode %{ |
kvn@4001 | 3689 | bool vector256 = true; |
kvn@4001 | 3690 | __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3691 | %} |
kvn@4001 | 3692 | ins_pipe( pipe_slow ); |
kvn@4001 | 3693 | %} |
kvn@4001 | 3694 | |
kvn@4001 | 3695 | instruct vdiv8F_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3696 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3697 | match(Set dst (DivVF src (LoadVector mem))); |
kvn@4001 | 3698 | format %{ "vdivps $dst,$src,$mem\t! div packed8F" %} |
kvn@4001 | 3699 | ins_encode %{ |
kvn@4001 | 3700 | bool vector256 = true; |
kvn@4001 | 3701 | __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3702 | %} |
kvn@4001 | 3703 | ins_pipe( pipe_slow ); |
kvn@4001 | 3704 | %} |
kvn@4001 | 3705 | |
kvn@4001 | 3706 | // Doubles vector div |
kvn@4001 | 3707 | instruct vdiv2D(vecX dst, vecX src) %{ |
kvn@4001 | 3708 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3709 | match(Set dst (DivVD dst src)); |
kvn@4001 | 3710 | format %{ "divpd $dst,$src\t! div packed2D" %} |
kvn@4001 | 3711 | ins_encode %{ |
kvn@4001 | 3712 | __ divpd($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 3713 | %} |
kvn@4001 | 3714 | ins_pipe( pipe_slow ); |
kvn@4001 | 3715 | %} |
kvn@4001 | 3716 | |
kvn@4001 | 3717 | instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 3718 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3719 | match(Set dst (DivVD src1 src2)); |
kvn@4001 | 3720 | format %{ "vdivpd $dst,$src1,$src2\t! div packed2D" %} |
kvn@4001 | 3721 | ins_encode %{ |
kvn@4001 | 3722 | bool vector256 = false; |
kvn@4001 | 3723 | __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3724 | %} |
kvn@4001 | 3725 | ins_pipe( pipe_slow ); |
kvn@4001 | 3726 | %} |
kvn@4001 | 3727 | |
kvn@4001 | 3728 | instruct vdiv2D_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 3729 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3730 | match(Set dst (DivVD src (LoadVector mem))); |
kvn@4001 | 3731 | format %{ "vdivpd $dst,$src,$mem\t! div packed2D" %} |
kvn@4001 | 3732 | ins_encode %{ |
kvn@4001 | 3733 | bool vector256 = false; |
kvn@4001 | 3734 | __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3735 | %} |
kvn@4001 | 3736 | ins_pipe( pipe_slow ); |
kvn@4001 | 3737 | %} |
kvn@4001 | 3738 | |
kvn@4001 | 3739 | instruct vdiv4D_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 3740 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3741 | match(Set dst (DivVD src1 src2)); |
kvn@4001 | 3742 | format %{ "vdivpd $dst,$src1,$src2\t! div packed4D" %} |
kvn@4001 | 3743 | ins_encode %{ |
kvn@4001 | 3744 | bool vector256 = true; |
kvn@4001 | 3745 | __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 3746 | %} |
kvn@4001 | 3747 | ins_pipe( pipe_slow ); |
kvn@4001 | 3748 | %} |
kvn@4001 | 3749 | |
kvn@4001 | 3750 | instruct vdiv4D_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 3751 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3752 | match(Set dst (DivVD src (LoadVector mem))); |
kvn@4001 | 3753 | format %{ "vdivpd $dst,$src,$mem\t! div packed4D" %} |
kvn@4001 | 3754 | ins_encode %{ |
kvn@4001 | 3755 | bool vector256 = true; |
kvn@4001 | 3756 | __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 3757 | %} |
kvn@4001 | 3758 | ins_pipe( pipe_slow ); |
kvn@4001 | 3759 | %} |
kvn@4001 | 3760 | |
kvn@4001 | 3761 | // ------------------------------ LeftShift ----------------------------------- |
kvn@4001 | 3762 | |
kvn@4001 | 3763 | // Shorts/Chars vector left shift |
kvn@4001 | 3764 | instruct vsll2S(vecS dst, regF shift) %{ |
kvn@4001 | 3765 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3766 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3767 | format %{ "psllw $dst,$shift\t! left shift packed2S" %} |
kvn@4001 | 3768 | ins_encode %{ |
kvn@4001 | 3769 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3770 | %} |
kvn@4001 | 3771 | ins_pipe( pipe_slow ); |
kvn@4001 | 3772 | %} |
kvn@4001 | 3773 | |
kvn@4001 | 3774 | instruct vsll2S_imm(vecS dst, immI8 shift) %{ |
kvn@4001 | 3775 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3776 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3777 | format %{ "psllw $dst,$shift\t! left shift packed2S" %} |
kvn@4001 | 3778 | ins_encode %{ |
kvn@4001 | 3779 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3780 | %} |
kvn@4001 | 3781 | ins_pipe( pipe_slow ); |
kvn@4001 | 3782 | %} |
kvn@4001 | 3783 | |
kvn@4001 | 3784 | instruct vsll2S_reg(vecS dst, vecS src, regF shift) %{ |
kvn@4001 | 3785 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3786 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3787 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} |
kvn@4001 | 3788 | ins_encode %{ |
kvn@4001 | 3789 | bool vector256 = false; |
kvn@4001 | 3790 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3791 | %} |
kvn@4001 | 3792 | ins_pipe( pipe_slow ); |
kvn@4001 | 3793 | %} |
kvn@4001 | 3794 | |
kvn@4001 | 3795 | instruct vsll2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ |
kvn@4001 | 3796 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3797 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3798 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} |
kvn@4001 | 3799 | ins_encode %{ |
kvn@4001 | 3800 | bool vector256 = false; |
kvn@4001 | 3801 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3802 | %} |
kvn@4001 | 3803 | ins_pipe( pipe_slow ); |
kvn@4001 | 3804 | %} |
kvn@4001 | 3805 | |
kvn@4001 | 3806 | instruct vsll4S(vecD dst, regF shift) %{ |
kvn@4001 | 3807 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3808 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3809 | format %{ "psllw $dst,$shift\t! left shift packed4S" %} |
kvn@4001 | 3810 | ins_encode %{ |
kvn@4001 | 3811 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3812 | %} |
kvn@4001 | 3813 | ins_pipe( pipe_slow ); |
kvn@4001 | 3814 | %} |
kvn@4001 | 3815 | |
kvn@4001 | 3816 | instruct vsll4S_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 3817 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3818 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3819 | format %{ "psllw $dst,$shift\t! left shift packed4S" %} |
kvn@4001 | 3820 | ins_encode %{ |
kvn@4001 | 3821 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3822 | %} |
kvn@4001 | 3823 | ins_pipe( pipe_slow ); |
kvn@4001 | 3824 | %} |
kvn@4001 | 3825 | |
kvn@4001 | 3826 | instruct vsll4S_reg(vecD dst, vecD src, regF shift) %{ |
kvn@4001 | 3827 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3828 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3829 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} |
kvn@4001 | 3830 | ins_encode %{ |
kvn@4001 | 3831 | bool vector256 = false; |
kvn@4001 | 3832 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3833 | %} |
kvn@4001 | 3834 | ins_pipe( pipe_slow ); |
kvn@4001 | 3835 | %} |
kvn@4001 | 3836 | |
kvn@4001 | 3837 | instruct vsll4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 3838 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3839 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3840 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} |
kvn@4001 | 3841 | ins_encode %{ |
kvn@4001 | 3842 | bool vector256 = false; |
kvn@4001 | 3843 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3844 | %} |
kvn@4001 | 3845 | ins_pipe( pipe_slow ); |
kvn@4001 | 3846 | %} |
kvn@4001 | 3847 | |
kvn@4001 | 3848 | instruct vsll8S(vecX dst, regF shift) %{ |
kvn@4001 | 3849 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3850 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3851 | format %{ "psllw $dst,$shift\t! left shift packed8S" %} |
kvn@4001 | 3852 | ins_encode %{ |
kvn@4001 | 3853 | __ psllw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3854 | %} |
kvn@4001 | 3855 | ins_pipe( pipe_slow ); |
kvn@4001 | 3856 | %} |
kvn@4001 | 3857 | |
kvn@4001 | 3858 | instruct vsll8S_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 3859 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 3860 | match(Set dst (LShiftVS dst shift)); |
kvn@4001 | 3861 | format %{ "psllw $dst,$shift\t! left shift packed8S" %} |
kvn@4001 | 3862 | ins_encode %{ |
kvn@4001 | 3863 | __ psllw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3864 | %} |
kvn@4001 | 3865 | ins_pipe( pipe_slow ); |
kvn@4001 | 3866 | %} |
kvn@4001 | 3867 | |
kvn@4001 | 3868 | instruct vsll8S_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 3869 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3870 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3871 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} |
kvn@4001 | 3872 | ins_encode %{ |
kvn@4001 | 3873 | bool vector256 = false; |
kvn@4001 | 3874 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3875 | %} |
kvn@4001 | 3876 | ins_pipe( pipe_slow ); |
kvn@4001 | 3877 | %} |
kvn@4001 | 3878 | |
kvn@4001 | 3879 | instruct vsll8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 3880 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 3881 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3882 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} |
kvn@4001 | 3883 | ins_encode %{ |
kvn@4001 | 3884 | bool vector256 = false; |
kvn@4001 | 3885 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3886 | %} |
kvn@4001 | 3887 | ins_pipe( pipe_slow ); |
kvn@4001 | 3888 | %} |
kvn@4001 | 3889 | |
kvn@4001 | 3890 | instruct vsll16S_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 3891 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3892 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3893 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} |
kvn@4001 | 3894 | ins_encode %{ |
kvn@4001 | 3895 | bool vector256 = true; |
kvn@4001 | 3896 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3897 | %} |
kvn@4001 | 3898 | ins_pipe( pipe_slow ); |
kvn@4001 | 3899 | %} |
kvn@4001 | 3900 | |
kvn@4001 | 3901 | instruct vsll16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 3902 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 3903 | match(Set dst (LShiftVS src shift)); |
kvn@4001 | 3904 | format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} |
kvn@4001 | 3905 | ins_encode %{ |
kvn@4001 | 3906 | bool vector256 = true; |
kvn@4001 | 3907 | __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3908 | %} |
kvn@4001 | 3909 | ins_pipe( pipe_slow ); |
kvn@4001 | 3910 | %} |
kvn@4001 | 3911 | |
kvn@4001 | 3912 | // Integers vector left shift |
kvn@4001 | 3913 | instruct vsll2I(vecD dst, regF shift) %{ |
kvn@4001 | 3914 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3915 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3916 | format %{ "pslld $dst,$shift\t! left shift packed2I" %} |
kvn@4001 | 3917 | ins_encode %{ |
kvn@4001 | 3918 | __ pslld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3919 | %} |
kvn@4001 | 3920 | ins_pipe( pipe_slow ); |
kvn@4001 | 3921 | %} |
kvn@4001 | 3922 | |
kvn@4001 | 3923 | instruct vsll2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 3924 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 3925 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3926 | format %{ "pslld $dst,$shift\t! left shift packed2I" %} |
kvn@4001 | 3927 | ins_encode %{ |
kvn@4001 | 3928 | __ pslld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3929 | %} |
kvn@4001 | 3930 | ins_pipe( pipe_slow ); |
kvn@4001 | 3931 | %} |
kvn@4001 | 3932 | |
kvn@4001 | 3933 | instruct vsll2I_reg(vecD dst, vecD src, regF shift) %{ |
kvn@4001 | 3934 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3935 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3936 | format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} |
kvn@4001 | 3937 | ins_encode %{ |
kvn@4001 | 3938 | bool vector256 = false; |
kvn@4001 | 3939 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3940 | %} |
kvn@4001 | 3941 | ins_pipe( pipe_slow ); |
kvn@4001 | 3942 | %} |
kvn@4001 | 3943 | |
kvn@4001 | 3944 | instruct vsll2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 3945 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 3946 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3947 | format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} |
kvn@4001 | 3948 | ins_encode %{ |
kvn@4001 | 3949 | bool vector256 = false; |
kvn@4001 | 3950 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3951 | %} |
kvn@4001 | 3952 | ins_pipe( pipe_slow ); |
kvn@4001 | 3953 | %} |
kvn@4001 | 3954 | |
kvn@4001 | 3955 | instruct vsll4I(vecX dst, regF shift) %{ |
kvn@4001 | 3956 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3957 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3958 | format %{ "pslld $dst,$shift\t! left shift packed4I" %} |
kvn@4001 | 3959 | ins_encode %{ |
kvn@4001 | 3960 | __ pslld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 3961 | %} |
kvn@4001 | 3962 | ins_pipe( pipe_slow ); |
kvn@4001 | 3963 | %} |
kvn@4001 | 3964 | |
kvn@4001 | 3965 | instruct vsll4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 3966 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 3967 | match(Set dst (LShiftVI dst shift)); |
kvn@4001 | 3968 | format %{ "pslld $dst,$shift\t! left shift packed4I" %} |
kvn@4001 | 3969 | ins_encode %{ |
kvn@4001 | 3970 | __ pslld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 3971 | %} |
kvn@4001 | 3972 | ins_pipe( pipe_slow ); |
kvn@4001 | 3973 | %} |
kvn@4001 | 3974 | |
kvn@4001 | 3975 | instruct vsll4I_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 3976 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3977 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3978 | format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} |
kvn@4001 | 3979 | ins_encode %{ |
kvn@4001 | 3980 | bool vector256 = false; |
kvn@4001 | 3981 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 3982 | %} |
kvn@4001 | 3983 | ins_pipe( pipe_slow ); |
kvn@4001 | 3984 | %} |
kvn@4001 | 3985 | |
kvn@4001 | 3986 | instruct vsll4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 3987 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 3988 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 3989 | format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} |
kvn@4001 | 3990 | ins_encode %{ |
kvn@4001 | 3991 | bool vector256 = false; |
kvn@4001 | 3992 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 3993 | %} |
kvn@4001 | 3994 | ins_pipe( pipe_slow ); |
kvn@4001 | 3995 | %} |
kvn@4001 | 3996 | |
kvn@4001 | 3997 | instruct vsll8I_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 3998 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 3999 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 4000 | format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} |
kvn@4001 | 4001 | ins_encode %{ |
kvn@4001 | 4002 | bool vector256 = true; |
kvn@4001 | 4003 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4004 | %} |
kvn@4001 | 4005 | ins_pipe( pipe_slow ); |
kvn@4001 | 4006 | %} |
kvn@4001 | 4007 | |
kvn@4001 | 4008 | instruct vsll8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4009 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4010 | match(Set dst (LShiftVI src shift)); |
kvn@4001 | 4011 | format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} |
kvn@4001 | 4012 | ins_encode %{ |
kvn@4001 | 4013 | bool vector256 = true; |
kvn@4001 | 4014 | __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4015 | %} |
kvn@4001 | 4016 | ins_pipe( pipe_slow ); |
kvn@4001 | 4017 | %} |
kvn@4001 | 4018 | |
kvn@4001 | 4019 | // Longs vector left shift |
kvn@4001 | 4020 | instruct vsll2L(vecX dst, regF shift) %{ |
kvn@4001 | 4021 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4022 | match(Set dst (LShiftVL dst shift)); |
kvn@4001 | 4023 | format %{ "psllq $dst,$shift\t! left shift packed2L" %} |
kvn@4001 | 4024 | ins_encode %{ |
kvn@4001 | 4025 | __ psllq($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4026 | %} |
kvn@4001 | 4027 | ins_pipe( pipe_slow ); |
kvn@4001 | 4028 | %} |
kvn@4001 | 4029 | |
kvn@4001 | 4030 | instruct vsll2L_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4031 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4032 | match(Set dst (LShiftVL dst shift)); |
kvn@4001 | 4033 | format %{ "psllq $dst,$shift\t! left shift packed2L" %} |
kvn@4001 | 4034 | ins_encode %{ |
kvn@4001 | 4035 | __ psllq($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4036 | %} |
kvn@4001 | 4037 | ins_pipe( pipe_slow ); |
kvn@4001 | 4038 | %} |
kvn@4001 | 4039 | |
kvn@4001 | 4040 | instruct vsll2L_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 4041 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4042 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4043 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} |
kvn@4001 | 4044 | ins_encode %{ |
kvn@4001 | 4045 | bool vector256 = false; |
kvn@4001 | 4046 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4047 | %} |
kvn@4001 | 4048 | ins_pipe( pipe_slow ); |
kvn@4001 | 4049 | %} |
kvn@4001 | 4050 | |
kvn@4001 | 4051 | instruct vsll2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4052 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4053 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4054 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} |
kvn@4001 | 4055 | ins_encode %{ |
kvn@4001 | 4056 | bool vector256 = false; |
kvn@4001 | 4057 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4058 | %} |
kvn@4001 | 4059 | ins_pipe( pipe_slow ); |
kvn@4001 | 4060 | %} |
kvn@4001 | 4061 | |
kvn@4001 | 4062 | instruct vsll4L_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 4063 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4064 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4065 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} |
kvn@4001 | 4066 | ins_encode %{ |
kvn@4001 | 4067 | bool vector256 = true; |
kvn@4001 | 4068 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4069 | %} |
kvn@4001 | 4070 | ins_pipe( pipe_slow ); |
kvn@4001 | 4071 | %} |
kvn@4001 | 4072 | |
kvn@4001 | 4073 | instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4074 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4075 | match(Set dst (LShiftVL src shift)); |
kvn@4001 | 4076 | format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} |
kvn@4001 | 4077 | ins_encode %{ |
kvn@4001 | 4078 | bool vector256 = true; |
kvn@4001 | 4079 | __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4080 | %} |
kvn@4001 | 4081 | ins_pipe( pipe_slow ); |
kvn@4001 | 4082 | %} |
kvn@4001 | 4083 | |
kvn@4001 | 4084 | // ----------------------- LogicalRightShift ----------------------------------- |
kvn@4001 | 4085 | |
kvn@4001 | 4086 | // Shorts/Chars vector logical right shift produces incorrect Java result |
kvn@4001 | 4087 | // for negative data because java code convert short value into int with |
kvn@4001 | 4088 | // sign extension before a shift. |
kvn@4001 | 4089 | |
kvn@4001 | 4090 | // Integers vector logical right shift |
kvn@4001 | 4091 | instruct vsrl2I(vecD dst, regF shift) %{ |
kvn@4001 | 4092 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4093 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4094 | format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4095 | ins_encode %{ |
kvn@4001 | 4096 | __ psrld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4097 | %} |
kvn@4001 | 4098 | ins_pipe( pipe_slow ); |
kvn@4001 | 4099 | %} |
kvn@4001 | 4100 | |
kvn@4001 | 4101 | instruct vsrl2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4102 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4103 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4104 | format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4105 | ins_encode %{ |
kvn@4001 | 4106 | __ psrld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4107 | %} |
kvn@4001 | 4108 | ins_pipe( pipe_slow ); |
kvn@4001 | 4109 | %} |
kvn@4001 | 4110 | |
kvn@4001 | 4111 | instruct vsrl2I_reg(vecD dst, vecD src, regF shift) %{ |
kvn@4001 | 4112 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4113 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4114 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4115 | ins_encode %{ |
kvn@4001 | 4116 | bool vector256 = false; |
kvn@4001 | 4117 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4118 | %} |
kvn@4001 | 4119 | ins_pipe( pipe_slow ); |
kvn@4001 | 4120 | %} |
kvn@4001 | 4121 | |
kvn@4001 | 4122 | instruct vsrl2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4123 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4124 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4125 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} |
kvn@4001 | 4126 | ins_encode %{ |
kvn@4001 | 4127 | bool vector256 = false; |
kvn@4001 | 4128 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4129 | %} |
kvn@4001 | 4130 | ins_pipe( pipe_slow ); |
kvn@4001 | 4131 | %} |
kvn@4001 | 4132 | |
kvn@4001 | 4133 | instruct vsrl4I(vecX dst, regF shift) %{ |
kvn@4001 | 4134 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4135 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4136 | format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4137 | ins_encode %{ |
kvn@4001 | 4138 | __ psrld($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4139 | %} |
kvn@4001 | 4140 | ins_pipe( pipe_slow ); |
kvn@4001 | 4141 | %} |
kvn@4001 | 4142 | |
kvn@4001 | 4143 | instruct vsrl4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4144 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4145 | match(Set dst (URShiftVI dst shift)); |
kvn@4001 | 4146 | format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4147 | ins_encode %{ |
kvn@4001 | 4148 | __ psrld($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4149 | %} |
kvn@4001 | 4150 | ins_pipe( pipe_slow ); |
kvn@4001 | 4151 | %} |
kvn@4001 | 4152 | |
kvn@4001 | 4153 | instruct vsrl4I_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 4154 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4155 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4156 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4157 | ins_encode %{ |
kvn@4001 | 4158 | bool vector256 = false; |
kvn@4001 | 4159 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4160 | %} |
kvn@4001 | 4161 | ins_pipe( pipe_slow ); |
kvn@4001 | 4162 | %} |
kvn@4001 | 4163 | |
kvn@4001 | 4164 | instruct vsrl4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4165 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4166 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4167 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} |
kvn@4001 | 4168 | ins_encode %{ |
kvn@4001 | 4169 | bool vector256 = false; |
kvn@4001 | 4170 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4171 | %} |
kvn@4001 | 4172 | ins_pipe( pipe_slow ); |
kvn@4001 | 4173 | %} |
kvn@4001 | 4174 | |
kvn@4001 | 4175 | instruct vsrl8I_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 4176 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4177 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4178 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} |
kvn@4001 | 4179 | ins_encode %{ |
kvn@4001 | 4180 | bool vector256 = true; |
kvn@4001 | 4181 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4182 | %} |
kvn@4001 | 4183 | ins_pipe( pipe_slow ); |
kvn@4001 | 4184 | %} |
kvn@4001 | 4185 | |
kvn@4001 | 4186 | instruct vsrl8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4187 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4188 | match(Set dst (URShiftVI src shift)); |
kvn@4001 | 4189 | format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} |
kvn@4001 | 4190 | ins_encode %{ |
kvn@4001 | 4191 | bool vector256 = true; |
kvn@4001 | 4192 | __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4193 | %} |
kvn@4001 | 4194 | ins_pipe( pipe_slow ); |
kvn@4001 | 4195 | %} |
kvn@4001 | 4196 | |
kvn@4001 | 4197 | // Longs vector logical right shift |
kvn@4001 | 4198 | instruct vsrl2L(vecX dst, regF shift) %{ |
kvn@4001 | 4199 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4200 | match(Set dst (URShiftVL dst shift)); |
kvn@4001 | 4201 | format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4202 | ins_encode %{ |
kvn@4001 | 4203 | __ psrlq($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4204 | %} |
kvn@4001 | 4205 | ins_pipe( pipe_slow ); |
kvn@4001 | 4206 | %} |
kvn@4001 | 4207 | |
kvn@4001 | 4208 | instruct vsrl2L_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4209 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4210 | match(Set dst (URShiftVL dst shift)); |
kvn@4001 | 4211 | format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4212 | ins_encode %{ |
kvn@4001 | 4213 | __ psrlq($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4214 | %} |
kvn@4001 | 4215 | ins_pipe( pipe_slow ); |
kvn@4001 | 4216 | %} |
kvn@4001 | 4217 | |
kvn@4001 | 4218 | instruct vsrl2L_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 4219 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4220 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4221 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4222 | ins_encode %{ |
kvn@4001 | 4223 | bool vector256 = false; |
kvn@4001 | 4224 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4225 | %} |
kvn@4001 | 4226 | ins_pipe( pipe_slow ); |
kvn@4001 | 4227 | %} |
kvn@4001 | 4228 | |
kvn@4001 | 4229 | instruct vsrl2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4230 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4231 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4232 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} |
kvn@4001 | 4233 | ins_encode %{ |
kvn@4001 | 4234 | bool vector256 = false; |
kvn@4001 | 4235 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4236 | %} |
kvn@4001 | 4237 | ins_pipe( pipe_slow ); |
kvn@4001 | 4238 | %} |
kvn@4001 | 4239 | |
kvn@4001 | 4240 | instruct vsrl4L_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 4241 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4242 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4243 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} |
kvn@4001 | 4244 | ins_encode %{ |
kvn@4001 | 4245 | bool vector256 = true; |
kvn@4001 | 4246 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4247 | %} |
kvn@4001 | 4248 | ins_pipe( pipe_slow ); |
kvn@4001 | 4249 | %} |
kvn@4001 | 4250 | |
kvn@4001 | 4251 | instruct vsrl4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4252 | predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
kvn@4001 | 4253 | match(Set dst (URShiftVL src shift)); |
kvn@4001 | 4254 | format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} |
kvn@4001 | 4255 | ins_encode %{ |
kvn@4001 | 4256 | bool vector256 = true; |
kvn@4001 | 4257 | __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4258 | %} |
kvn@4001 | 4259 | ins_pipe( pipe_slow ); |
kvn@4001 | 4260 | %} |
kvn@4001 | 4261 | |
kvn@4001 | 4262 | // ------------------- ArithmeticRightShift ----------------------------------- |
kvn@4001 | 4263 | |
kvn@4001 | 4264 | // Shorts/Chars vector arithmetic right shift |
kvn@4001 | 4265 | instruct vsra2S(vecS dst, regF shift) %{ |
kvn@4001 | 4266 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4267 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4268 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4269 | ins_encode %{ |
kvn@4001 | 4270 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4271 | %} |
kvn@4001 | 4272 | ins_pipe( pipe_slow ); |
kvn@4001 | 4273 | %} |
kvn@4001 | 4274 | |
kvn@4001 | 4275 | instruct vsra2S_imm(vecS dst, immI8 shift) %{ |
kvn@4001 | 4276 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4277 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4278 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4279 | ins_encode %{ |
kvn@4001 | 4280 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4281 | %} |
kvn@4001 | 4282 | ins_pipe( pipe_slow ); |
kvn@4001 | 4283 | %} |
kvn@4001 | 4284 | |
kvn@4001 | 4285 | instruct vsra2S_reg(vecS dst, vecS src, regF shift) %{ |
kvn@4001 | 4286 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4287 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4288 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4289 | ins_encode %{ |
kvn@4001 | 4290 | bool vector256 = false; |
kvn@4001 | 4291 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4292 | %} |
kvn@4001 | 4293 | ins_pipe( pipe_slow ); |
kvn@4001 | 4294 | %} |
kvn@4001 | 4295 | |
kvn@4001 | 4296 | instruct vsra2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ |
kvn@4001 | 4297 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4298 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4299 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} |
kvn@4001 | 4300 | ins_encode %{ |
kvn@4001 | 4301 | bool vector256 = false; |
kvn@4001 | 4302 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4303 | %} |
kvn@4001 | 4304 | ins_pipe( pipe_slow ); |
kvn@4001 | 4305 | %} |
kvn@4001 | 4306 | |
kvn@4001 | 4307 | instruct vsra4S(vecD dst, regF shift) %{ |
kvn@4001 | 4308 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4309 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4310 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4311 | ins_encode %{ |
kvn@4001 | 4312 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4313 | %} |
kvn@4001 | 4314 | ins_pipe( pipe_slow ); |
kvn@4001 | 4315 | %} |
kvn@4001 | 4316 | |
kvn@4001 | 4317 | instruct vsra4S_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4318 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4319 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4320 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4321 | ins_encode %{ |
kvn@4001 | 4322 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4323 | %} |
kvn@4001 | 4324 | ins_pipe( pipe_slow ); |
kvn@4001 | 4325 | %} |
kvn@4001 | 4326 | |
kvn@4001 | 4327 | instruct vsra4S_reg(vecD dst, vecD src, regF shift) %{ |
kvn@4001 | 4328 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4329 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4330 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4331 | ins_encode %{ |
kvn@4001 | 4332 | bool vector256 = false; |
kvn@4001 | 4333 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4334 | %} |
kvn@4001 | 4335 | ins_pipe( pipe_slow ); |
kvn@4001 | 4336 | %} |
kvn@4001 | 4337 | |
kvn@4001 | 4338 | instruct vsra4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4339 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4340 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4341 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} |
kvn@4001 | 4342 | ins_encode %{ |
kvn@4001 | 4343 | bool vector256 = false; |
kvn@4001 | 4344 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4345 | %} |
kvn@4001 | 4346 | ins_pipe( pipe_slow ); |
kvn@4001 | 4347 | %} |
kvn@4001 | 4348 | |
kvn@4001 | 4349 | instruct vsra8S(vecX dst, regF shift) %{ |
kvn@4001 | 4350 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 4351 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4352 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4353 | ins_encode %{ |
kvn@4001 | 4354 | __ psraw($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4355 | %} |
kvn@4001 | 4356 | ins_pipe( pipe_slow ); |
kvn@4001 | 4357 | %} |
kvn@4001 | 4358 | |
kvn@4001 | 4359 | instruct vsra8S_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4360 | predicate(n->as_Vector()->length() == 8); |
kvn@4001 | 4361 | match(Set dst (RShiftVS dst shift)); |
kvn@4001 | 4362 | format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4363 | ins_encode %{ |
kvn@4001 | 4364 | __ psraw($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4365 | %} |
kvn@4001 | 4366 | ins_pipe( pipe_slow ); |
kvn@4001 | 4367 | %} |
kvn@4001 | 4368 | |
kvn@4001 | 4369 | instruct vsra8S_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 4370 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 4371 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4372 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4373 | ins_encode %{ |
kvn@4001 | 4374 | bool vector256 = false; |
kvn@4001 | 4375 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4376 | %} |
kvn@4001 | 4377 | ins_pipe( pipe_slow ); |
kvn@4001 | 4378 | %} |
kvn@4001 | 4379 | |
kvn@4001 | 4380 | instruct vsra8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4381 | predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
kvn@4001 | 4382 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4383 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} |
kvn@4001 | 4384 | ins_encode %{ |
kvn@4001 | 4385 | bool vector256 = false; |
kvn@4001 | 4386 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4387 | %} |
kvn@4001 | 4388 | ins_pipe( pipe_slow ); |
kvn@4001 | 4389 | %} |
kvn@4001 | 4390 | |
kvn@4001 | 4391 | instruct vsra16S_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 4392 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 4393 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4394 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} |
kvn@4001 | 4395 | ins_encode %{ |
kvn@4001 | 4396 | bool vector256 = true; |
kvn@4001 | 4397 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4398 | %} |
kvn@4001 | 4399 | ins_pipe( pipe_slow ); |
kvn@4001 | 4400 | %} |
kvn@4001 | 4401 | |
kvn@4001 | 4402 | instruct vsra16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4403 | predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
kvn@4001 | 4404 | match(Set dst (RShiftVS src shift)); |
kvn@4001 | 4405 | format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} |
kvn@4001 | 4406 | ins_encode %{ |
kvn@4001 | 4407 | bool vector256 = true; |
kvn@4001 | 4408 | __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4409 | %} |
kvn@4001 | 4410 | ins_pipe( pipe_slow ); |
kvn@4001 | 4411 | %} |
kvn@4001 | 4412 | |
kvn@4001 | 4413 | // Integers vector arithmetic right shift |
kvn@4001 | 4414 | instruct vsra2I(vecD dst, regF shift) %{ |
kvn@4001 | 4415 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4416 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4417 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4418 | ins_encode %{ |
kvn@4001 | 4419 | __ psrad($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4420 | %} |
kvn@4001 | 4421 | ins_pipe( pipe_slow ); |
kvn@4001 | 4422 | %} |
kvn@4001 | 4423 | |
kvn@4001 | 4424 | instruct vsra2I_imm(vecD dst, immI8 shift) %{ |
kvn@4001 | 4425 | predicate(n->as_Vector()->length() == 2); |
kvn@4001 | 4426 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4427 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4428 | ins_encode %{ |
kvn@4001 | 4429 | __ psrad($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4430 | %} |
kvn@4001 | 4431 | ins_pipe( pipe_slow ); |
kvn@4001 | 4432 | %} |
kvn@4001 | 4433 | |
kvn@4001 | 4434 | instruct vsra2I_reg(vecD dst, vecD src, regF shift) %{ |
kvn@4001 | 4435 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4436 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4437 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4438 | ins_encode %{ |
kvn@4001 | 4439 | bool vector256 = false; |
kvn@4001 | 4440 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4441 | %} |
kvn@4001 | 4442 | ins_pipe( pipe_slow ); |
kvn@4001 | 4443 | %} |
kvn@4001 | 4444 | |
kvn@4001 | 4445 | instruct vsra2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ |
kvn@4001 | 4446 | predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
kvn@4001 | 4447 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4448 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} |
kvn@4001 | 4449 | ins_encode %{ |
kvn@4001 | 4450 | bool vector256 = false; |
kvn@4001 | 4451 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4452 | %} |
kvn@4001 | 4453 | ins_pipe( pipe_slow ); |
kvn@4001 | 4454 | %} |
kvn@4001 | 4455 | |
kvn@4001 | 4456 | instruct vsra4I(vecX dst, regF shift) %{ |
kvn@4001 | 4457 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4458 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4459 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4460 | ins_encode %{ |
kvn@4001 | 4461 | __ psrad($dst$$XMMRegister, $shift$$XMMRegister); |
kvn@4001 | 4462 | %} |
kvn@4001 | 4463 | ins_pipe( pipe_slow ); |
kvn@4001 | 4464 | %} |
kvn@4001 | 4465 | |
kvn@4001 | 4466 | instruct vsra4I_imm(vecX dst, immI8 shift) %{ |
kvn@4001 | 4467 | predicate(n->as_Vector()->length() == 4); |
kvn@4001 | 4468 | match(Set dst (RShiftVI dst shift)); |
kvn@4001 | 4469 | format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4470 | ins_encode %{ |
kvn@4001 | 4471 | __ psrad($dst$$XMMRegister, (int)$shift$$constant); |
kvn@4001 | 4472 | %} |
kvn@4001 | 4473 | ins_pipe( pipe_slow ); |
kvn@4001 | 4474 | %} |
kvn@4001 | 4475 | |
kvn@4001 | 4476 | instruct vsra4I_reg(vecX dst, vecX src, regF shift) %{ |
kvn@4001 | 4477 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4478 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4479 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4480 | ins_encode %{ |
kvn@4001 | 4481 | bool vector256 = false; |
kvn@4001 | 4482 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4483 | %} |
kvn@4001 | 4484 | ins_pipe( pipe_slow ); |
kvn@4001 | 4485 | %} |
kvn@4001 | 4486 | |
kvn@4001 | 4487 | instruct vsra4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ |
kvn@4001 | 4488 | predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
kvn@4001 | 4489 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4490 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} |
kvn@4001 | 4491 | ins_encode %{ |
kvn@4001 | 4492 | bool vector256 = false; |
kvn@4001 | 4493 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4494 | %} |
kvn@4001 | 4495 | ins_pipe( pipe_slow ); |
kvn@4001 | 4496 | %} |
kvn@4001 | 4497 | |
kvn@4001 | 4498 | instruct vsra8I_reg(vecY dst, vecY src, regF shift) %{ |
kvn@4001 | 4499 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4500 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4501 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} |
kvn@4001 | 4502 | ins_encode %{ |
kvn@4001 | 4503 | bool vector256 = true; |
kvn@4001 | 4504 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); |
kvn@4001 | 4505 | %} |
kvn@4001 | 4506 | ins_pipe( pipe_slow ); |
kvn@4001 | 4507 | %} |
kvn@4001 | 4508 | |
kvn@4001 | 4509 | instruct vsra8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ |
kvn@4001 | 4510 | predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
kvn@4001 | 4511 | match(Set dst (RShiftVI src shift)); |
kvn@4001 | 4512 | format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} |
kvn@4001 | 4513 | ins_encode %{ |
kvn@4001 | 4514 | bool vector256 = true; |
kvn@4001 | 4515 | __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); |
kvn@4001 | 4516 | %} |
kvn@4001 | 4517 | ins_pipe( pipe_slow ); |
kvn@4001 | 4518 | %} |
kvn@4001 | 4519 | |
kvn@4001 | 4520 | // There are no longs vector arithmetic right shift instructions. |
kvn@4001 | 4521 | |
kvn@4001 | 4522 | |
kvn@4001 | 4523 | // --------------------------------- AND -------------------------------------- |
kvn@4001 | 4524 | |
kvn@4001 | 4525 | instruct vand4B(vecS dst, vecS src) %{ |
kvn@4001 | 4526 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4527 | match(Set dst (AndV dst src)); |
kvn@4001 | 4528 | format %{ "pand $dst,$src\t! and vectors (4 bytes)" %} |
kvn@4001 | 4529 | ins_encode %{ |
kvn@4001 | 4530 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4531 | %} |
kvn@4001 | 4532 | ins_pipe( pipe_slow ); |
kvn@4001 | 4533 | %} |
kvn@4001 | 4534 | |
kvn@4001 | 4535 | instruct vand4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4536 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4537 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4538 | format %{ "vpand $dst,$src1,$src2\t! and vectors (4 bytes)" %} |
kvn@4001 | 4539 | ins_encode %{ |
kvn@4001 | 4540 | bool vector256 = false; |
kvn@4001 | 4541 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4542 | %} |
kvn@4001 | 4543 | ins_pipe( pipe_slow ); |
kvn@4001 | 4544 | %} |
kvn@4001 | 4545 | |
kvn@4001 | 4546 | instruct vand8B(vecD dst, vecD src) %{ |
kvn@4001 | 4547 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4548 | match(Set dst (AndV dst src)); |
kvn@4001 | 4549 | format %{ "pand $dst,$src\t! and vectors (8 bytes)" %} |
kvn@4001 | 4550 | ins_encode %{ |
kvn@4001 | 4551 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4552 | %} |
kvn@4001 | 4553 | ins_pipe( pipe_slow ); |
kvn@4001 | 4554 | %} |
kvn@4001 | 4555 | |
kvn@4001 | 4556 | instruct vand8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4557 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4558 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4559 | format %{ "vpand $dst,$src1,$src2\t! and vectors (8 bytes)" %} |
kvn@4001 | 4560 | ins_encode %{ |
kvn@4001 | 4561 | bool vector256 = false; |
kvn@4001 | 4562 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4563 | %} |
kvn@4001 | 4564 | ins_pipe( pipe_slow ); |
kvn@4001 | 4565 | %} |
kvn@4001 | 4566 | |
kvn@4001 | 4567 | instruct vand16B(vecX dst, vecX src) %{ |
kvn@4001 | 4568 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4569 | match(Set dst (AndV dst src)); |
kvn@4001 | 4570 | format %{ "pand $dst,$src\t! and vectors (16 bytes)" %} |
kvn@4001 | 4571 | ins_encode %{ |
kvn@4001 | 4572 | __ pand($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4573 | %} |
kvn@4001 | 4574 | ins_pipe( pipe_slow ); |
kvn@4001 | 4575 | %} |
kvn@4001 | 4576 | |
kvn@4001 | 4577 | instruct vand16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4578 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4579 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4580 | format %{ "vpand $dst,$src1,$src2\t! and vectors (16 bytes)" %} |
kvn@4001 | 4581 | ins_encode %{ |
kvn@4001 | 4582 | bool vector256 = false; |
kvn@4001 | 4583 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4584 | %} |
kvn@4001 | 4585 | ins_pipe( pipe_slow ); |
kvn@4001 | 4586 | %} |
kvn@4001 | 4587 | |
kvn@4001 | 4588 | instruct vand16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4589 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4590 | match(Set dst (AndV src (LoadVector mem))); |
kvn@4001 | 4591 | format %{ "vpand $dst,$src,$mem\t! and vectors (16 bytes)" %} |
kvn@4001 | 4592 | ins_encode %{ |
kvn@4001 | 4593 | bool vector256 = false; |
kvn@4001 | 4594 | __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4595 | %} |
kvn@4001 | 4596 | ins_pipe( pipe_slow ); |
kvn@4001 | 4597 | %} |
kvn@4001 | 4598 | |
kvn@4001 | 4599 | instruct vand32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4600 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4601 | match(Set dst (AndV src1 src2)); |
kvn@4001 | 4602 | format %{ "vpand $dst,$src1,$src2\t! and vectors (32 bytes)" %} |
kvn@4001 | 4603 | ins_encode %{ |
kvn@4001 | 4604 | bool vector256 = true; |
kvn@4001 | 4605 | __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4606 | %} |
kvn@4001 | 4607 | ins_pipe( pipe_slow ); |
kvn@4001 | 4608 | %} |
kvn@4001 | 4609 | |
kvn@4001 | 4610 | instruct vand32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4611 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4612 | match(Set dst (AndV src (LoadVector mem))); |
kvn@4001 | 4613 | format %{ "vpand $dst,$src,$mem\t! and vectors (32 bytes)" %} |
kvn@4001 | 4614 | ins_encode %{ |
kvn@4001 | 4615 | bool vector256 = true; |
kvn@4001 | 4616 | __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4617 | %} |
kvn@4001 | 4618 | ins_pipe( pipe_slow ); |
kvn@4001 | 4619 | %} |
kvn@4001 | 4620 | |
kvn@4001 | 4621 | // --------------------------------- OR --------------------------------------- |
kvn@4001 | 4622 | |
kvn@4001 | 4623 | instruct vor4B(vecS dst, vecS src) %{ |
kvn@4001 | 4624 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4625 | match(Set dst (OrV dst src)); |
kvn@4001 | 4626 | format %{ "por $dst,$src\t! or vectors (4 bytes)" %} |
kvn@4001 | 4627 | ins_encode %{ |
kvn@4001 | 4628 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4629 | %} |
kvn@4001 | 4630 | ins_pipe( pipe_slow ); |
kvn@4001 | 4631 | %} |
kvn@4001 | 4632 | |
kvn@4001 | 4633 | instruct vor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4634 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4635 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4636 | format %{ "vpor $dst,$src1,$src2\t! or vectors (4 bytes)" %} |
kvn@4001 | 4637 | ins_encode %{ |
kvn@4001 | 4638 | bool vector256 = false; |
kvn@4001 | 4639 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4640 | %} |
kvn@4001 | 4641 | ins_pipe( pipe_slow ); |
kvn@4001 | 4642 | %} |
kvn@4001 | 4643 | |
kvn@4001 | 4644 | instruct vor8B(vecD dst, vecD src) %{ |
kvn@4001 | 4645 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4646 | match(Set dst (OrV dst src)); |
kvn@4001 | 4647 | format %{ "por $dst,$src\t! or vectors (8 bytes)" %} |
kvn@4001 | 4648 | ins_encode %{ |
kvn@4001 | 4649 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4650 | %} |
kvn@4001 | 4651 | ins_pipe( pipe_slow ); |
kvn@4001 | 4652 | %} |
kvn@4001 | 4653 | |
kvn@4001 | 4654 | instruct vor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4655 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4656 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4657 | format %{ "vpor $dst,$src1,$src2\t! or vectors (8 bytes)" %} |
kvn@4001 | 4658 | ins_encode %{ |
kvn@4001 | 4659 | bool vector256 = false; |
kvn@4001 | 4660 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4661 | %} |
kvn@4001 | 4662 | ins_pipe( pipe_slow ); |
kvn@4001 | 4663 | %} |
kvn@4001 | 4664 | |
kvn@4001 | 4665 | instruct vor16B(vecX dst, vecX src) %{ |
kvn@4001 | 4666 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4667 | match(Set dst (OrV dst src)); |
kvn@4001 | 4668 | format %{ "por $dst,$src\t! or vectors (16 bytes)" %} |
kvn@4001 | 4669 | ins_encode %{ |
kvn@4001 | 4670 | __ por($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4671 | %} |
kvn@4001 | 4672 | ins_pipe( pipe_slow ); |
kvn@4001 | 4673 | %} |
kvn@4001 | 4674 | |
kvn@4001 | 4675 | instruct vor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4676 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4677 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4678 | format %{ "vpor $dst,$src1,$src2\t! or vectors (16 bytes)" %} |
kvn@4001 | 4679 | ins_encode %{ |
kvn@4001 | 4680 | bool vector256 = false; |
kvn@4001 | 4681 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4682 | %} |
kvn@4001 | 4683 | ins_pipe( pipe_slow ); |
kvn@4001 | 4684 | %} |
kvn@4001 | 4685 | |
kvn@4001 | 4686 | instruct vor16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4687 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4688 | match(Set dst (OrV src (LoadVector mem))); |
kvn@4001 | 4689 | format %{ "vpor $dst,$src,$mem\t! or vectors (16 bytes)" %} |
kvn@4001 | 4690 | ins_encode %{ |
kvn@4001 | 4691 | bool vector256 = false; |
kvn@4001 | 4692 | __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4693 | %} |
kvn@4001 | 4694 | ins_pipe( pipe_slow ); |
kvn@4001 | 4695 | %} |
kvn@4001 | 4696 | |
kvn@4001 | 4697 | instruct vor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4698 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4699 | match(Set dst (OrV src1 src2)); |
kvn@4001 | 4700 | format %{ "vpor $dst,$src1,$src2\t! or vectors (32 bytes)" %} |
kvn@4001 | 4701 | ins_encode %{ |
kvn@4001 | 4702 | bool vector256 = true; |
kvn@4001 | 4703 | __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4704 | %} |
kvn@4001 | 4705 | ins_pipe( pipe_slow ); |
kvn@4001 | 4706 | %} |
kvn@4001 | 4707 | |
kvn@4001 | 4708 | instruct vor32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4709 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4710 | match(Set dst (OrV src (LoadVector mem))); |
kvn@4001 | 4711 | format %{ "vpor $dst,$src,$mem\t! or vectors (32 bytes)" %} |
kvn@4001 | 4712 | ins_encode %{ |
kvn@4001 | 4713 | bool vector256 = true; |
kvn@4001 | 4714 | __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4715 | %} |
kvn@4001 | 4716 | ins_pipe( pipe_slow ); |
kvn@4001 | 4717 | %} |
kvn@4001 | 4718 | |
kvn@4001 | 4719 | // --------------------------------- XOR -------------------------------------- |
kvn@4001 | 4720 | |
kvn@4001 | 4721 | instruct vxor4B(vecS dst, vecS src) %{ |
kvn@4001 | 4722 | predicate(n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4723 | match(Set dst (XorV dst src)); |
kvn@4001 | 4724 | format %{ "pxor $dst,$src\t! xor vectors (4 bytes)" %} |
kvn@4001 | 4725 | ins_encode %{ |
kvn@4001 | 4726 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4727 | %} |
kvn@4001 | 4728 | ins_pipe( pipe_slow ); |
kvn@4001 | 4729 | %} |
kvn@4001 | 4730 | |
kvn@4001 | 4731 | instruct vxor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
kvn@4001 | 4732 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
kvn@4001 | 4733 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4734 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (4 bytes)" %} |
kvn@4001 | 4735 | ins_encode %{ |
kvn@4001 | 4736 | bool vector256 = false; |
kvn@4001 | 4737 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4738 | %} |
kvn@4001 | 4739 | ins_pipe( pipe_slow ); |
kvn@4001 | 4740 | %} |
kvn@4001 | 4741 | |
kvn@4001 | 4742 | instruct vxor8B(vecD dst, vecD src) %{ |
kvn@4001 | 4743 | predicate(n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4744 | match(Set dst (XorV dst src)); |
kvn@4001 | 4745 | format %{ "pxor $dst,$src\t! xor vectors (8 bytes)" %} |
kvn@4001 | 4746 | ins_encode %{ |
kvn@4001 | 4747 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4748 | %} |
kvn@4001 | 4749 | ins_pipe( pipe_slow ); |
kvn@4001 | 4750 | %} |
kvn@4001 | 4751 | |
kvn@4001 | 4752 | instruct vxor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
kvn@4001 | 4753 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
kvn@4001 | 4754 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4755 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (8 bytes)" %} |
kvn@4001 | 4756 | ins_encode %{ |
kvn@4001 | 4757 | bool vector256 = false; |
kvn@4001 | 4758 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4759 | %} |
kvn@4001 | 4760 | ins_pipe( pipe_slow ); |
kvn@4001 | 4761 | %} |
kvn@4001 | 4762 | |
kvn@4001 | 4763 | instruct vxor16B(vecX dst, vecX src) %{ |
kvn@4001 | 4764 | predicate(n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4765 | match(Set dst (XorV dst src)); |
kvn@4001 | 4766 | format %{ "pxor $dst,$src\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4767 | ins_encode %{ |
kvn@4001 | 4768 | __ pxor($dst$$XMMRegister, $src$$XMMRegister); |
kvn@4001 | 4769 | %} |
kvn@4001 | 4770 | ins_pipe( pipe_slow ); |
kvn@4001 | 4771 | %} |
kvn@4001 | 4772 | |
kvn@4001 | 4773 | instruct vxor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
kvn@4001 | 4774 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4775 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4776 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4777 | ins_encode %{ |
kvn@4001 | 4778 | bool vector256 = false; |
kvn@4001 | 4779 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4780 | %} |
kvn@4001 | 4781 | ins_pipe( pipe_slow ); |
kvn@4001 | 4782 | %} |
kvn@4001 | 4783 | |
kvn@4001 | 4784 | instruct vxor16B_mem(vecX dst, vecX src, memory mem) %{ |
kvn@4001 | 4785 | predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
kvn@4001 | 4786 | match(Set dst (XorV src (LoadVector mem))); |
kvn@4001 | 4787 | format %{ "vpxor $dst,$src,$mem\t! xor vectors (16 bytes)" %} |
kvn@4001 | 4788 | ins_encode %{ |
kvn@4001 | 4789 | bool vector256 = false; |
kvn@4001 | 4790 | __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4791 | %} |
kvn@4001 | 4792 | ins_pipe( pipe_slow ); |
kvn@4001 | 4793 | %} |
kvn@4001 | 4794 | |
kvn@4001 | 4795 | instruct vxor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
kvn@4001 | 4796 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4797 | match(Set dst (XorV src1 src2)); |
kvn@4001 | 4798 | format %{ "vpxor $dst,$src1,$src2\t! xor vectors (32 bytes)" %} |
kvn@4001 | 4799 | ins_encode %{ |
kvn@4001 | 4800 | bool vector256 = true; |
kvn@4001 | 4801 | __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); |
kvn@4001 | 4802 | %} |
kvn@4001 | 4803 | ins_pipe( pipe_slow ); |
kvn@4001 | 4804 | %} |
kvn@4001 | 4805 | |
kvn@4001 | 4806 | instruct vxor32B_mem(vecY dst, vecY src, memory mem) %{ |
kvn@4001 | 4807 | predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
kvn@4001 | 4808 | match(Set dst (XorV src (LoadVector mem))); |
kvn@4001 | 4809 | format %{ "vpxor $dst,$src,$mem\t! xor vectors (32 bytes)" %} |
kvn@4001 | 4810 | ins_encode %{ |
kvn@4001 | 4811 | bool vector256 = true; |
kvn@4001 | 4812 | __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); |
kvn@4001 | 4813 | %} |
kvn@4001 | 4814 | ins_pipe( pipe_slow ); |
kvn@4001 | 4815 | %} |
kvn@4001 | 4816 |