kvn@3390: // kvn@3577: // Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. kvn@3390: // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. kvn@3390: // kvn@3390: // This code is free software; you can redistribute it and/or modify it kvn@3390: // under the terms of the GNU General Public License version 2 only, as kvn@3390: // published by the Free Software Foundation. kvn@3390: // kvn@3390: // This code is distributed in the hope that it will be useful, but WITHOUT kvn@3390: // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or kvn@3390: // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License kvn@3390: // version 2 for more details (a copy is included in the LICENSE file that kvn@3390: // accompanied this code). kvn@3390: // kvn@3390: // You should have received a copy of the GNU General Public License version kvn@3390: // 2 along with this work; if not, write to the Free Software Foundation, kvn@3390: // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. kvn@3390: // kvn@3390: // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA kvn@3390: // or visit www.oracle.com if you need additional information or have any kvn@3390: // questions. kvn@3390: // kvn@3390: // kvn@3390: kvn@3390: // X86 Common Architecture Description File kvn@3390: kvn@3882: //----------REGISTER DEFINITION BLOCK------------------------------------------ kvn@3882: // This information is used by the matcher and the register allocator to kvn@3882: // describe individual registers and classes of registers within the target kvn@3882: // archtecture. kvn@3882: kvn@3882: register %{ kvn@3882: //----------Architecture Description Register Definitions---------------------- kvn@3882: // General Registers kvn@3882: // "reg_def" name ( register save type, C convention save type, kvn@3882: // ideal register type, encoding ); kvn@3882: // Register Save Types: kvn@3882: // kvn@3882: // NS = No-Save: The register allocator assumes that these registers kvn@3882: // can be used without saving upon entry to the method, & kvn@3882: // that they do not need to be saved at call sites. kvn@3882: // kvn@3882: // SOC = Save-On-Call: The register allocator assumes that these registers kvn@3882: // can be used without saving upon entry to the method, kvn@3882: // but that they must be saved at call sites. kvn@3882: // kvn@3882: // SOE = Save-On-Entry: The register allocator assumes that these registers kvn@3882: // must be saved before using them upon entry to the kvn@3882: // method, but they do not need to be saved at call kvn@3882: // sites. kvn@3882: // kvn@3882: // AS = Always-Save: The register allocator assumes that these registers kvn@3882: // must be saved before using them upon entry to the kvn@3882: // method, & that they must be saved at call sites. kvn@3882: // kvn@3882: // Ideal Register Type is used to determine how to save & restore a kvn@3882: // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get kvn@3882: // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. kvn@3882: // kvn@3882: // The encoding number is the actual bit-pattern placed into the opcodes. kvn@3882: kvn@3882: // XMM registers. 256-bit registers or 8 words each, labeled (a)-h. kvn@3882: // Word a in each register holds a Float, words ab hold a Double. kvn@3882: // The whole registers are used in SSE4.2 version intrinsics, kvn@3882: // array copy stubs and superword operations (see UseSSE42Intrinsics, kvn@3882: // UseXMMForArrayCopy and UseSuperword flags). kvn@3882: // XMM8-XMM15 must be encoded with REX (VEX for UseAVX). kvn@3882: // Linux ABI: No register preserved across function calls kvn@3882: // XMM0-XMM7 might hold parameters kvn@3882: // Windows ABI: XMM6-XMM15 preserved across function calls kvn@3882: // XMM0-XMM3 might hold parameters kvn@3882: kvn@3882: reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); kvn@3929: reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1)); kvn@3929: reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2)); kvn@3929: reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3)); kvn@3929: reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4)); kvn@3929: reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5)); kvn@3929: reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6)); kvn@3929: reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); kvn@3929: reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1)); kvn@3929: reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2)); kvn@3929: reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3)); kvn@3929: reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4)); kvn@3929: reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5)); kvn@3929: reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6)); kvn@3929: reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); kvn@3929: reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1)); kvn@3929: reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2)); kvn@3929: reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3)); kvn@3929: reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4)); kvn@3929: reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5)); kvn@3929: reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6)); kvn@3929: reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); kvn@3929: reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1)); kvn@3929: reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2)); kvn@3929: reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3)); kvn@3929: reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4)); kvn@3929: reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5)); kvn@3929: reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6)); kvn@3929: reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); kvn@3929: reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1)); kvn@3929: reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2)); kvn@3929: reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3)); kvn@3929: reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4)); kvn@3929: reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5)); kvn@3929: reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6)); kvn@3929: reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); kvn@3929: reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1)); kvn@3929: reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2)); kvn@3929: reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3)); kvn@3929: reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4)); kvn@3929: reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5)); kvn@3929: reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6)); kvn@3929: reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7)); kvn@3882: kvn@3882: #ifdef _WIN64 kvn@3882: kvn@3882: reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()); kvn@3929: reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(1)); kvn@3929: reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(2)); kvn@3929: reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(3)); kvn@3929: reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(4)); kvn@3929: reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(5)); kvn@3929: reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(6)); kvn@3929: reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()); kvn@3929: reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(1)); kvn@3929: reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(2)); kvn@3929: reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(3)); kvn@3929: reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(4)); kvn@3929: reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(5)); kvn@3929: reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(6)); kvn@3929: reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()); kvn@3929: reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(1)); kvn@3929: reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(2)); kvn@3929: reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(3)); kvn@3929: reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(4)); kvn@3929: reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(5)); kvn@3929: reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(6)); kvn@3929: reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()); kvn@3929: reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(1)); kvn@3929: reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(2)); kvn@3929: reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(3)); kvn@3929: reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(4)); kvn@3929: reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(5)); kvn@3929: reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(6)); kvn@3929: reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()); kvn@3929: reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(1)); kvn@3929: reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(2)); kvn@3929: reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(3)); kvn@3929: reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(4)); kvn@3929: reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(5)); kvn@3929: reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(6)); kvn@3929: reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()); kvn@3929: reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(1)); kvn@3929: reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(2)); kvn@3929: reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(3)); kvn@3929: reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(4)); kvn@3929: reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(5)); kvn@3929: reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(6)); kvn@3929: reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()); kvn@3929: reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(1)); kvn@3929: reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(2)); kvn@3929: reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(3)); kvn@3929: reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(4)); kvn@3929: reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(5)); kvn@3929: reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(6)); kvn@3929: reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()); kvn@3929: reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(1)); kvn@3929: reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(2)); kvn@3929: reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(3)); kvn@3929: reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(4)); kvn@3929: reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(5)); kvn@3929: reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(6)); kvn@3929: reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()); kvn@3929: reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(1)); kvn@3929: reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(2)); kvn@3929: reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(3)); kvn@3929: reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(4)); kvn@3929: reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(5)); kvn@3929: reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(6)); kvn@3929: reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()); kvn@3929: reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(1)); kvn@3929: reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(2)); kvn@3929: reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(3)); kvn@3929: reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(4)); kvn@3929: reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(5)); kvn@3929: reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(6)); kvn@3929: reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(7)); kvn@3882: kvn@3882: #else // _WIN64 kvn@3882: kvn@3882: reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); kvn@3929: reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1)); kvn@3929: reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2)); kvn@3929: reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3)); kvn@3929: reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4)); kvn@3929: reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5)); kvn@3929: reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6)); kvn@3929: reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); kvn@3929: reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1)); kvn@3929: reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2)); kvn@3929: reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3)); kvn@3929: reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4)); kvn@3929: reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5)); kvn@3929: reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6)); kvn@3929: reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7)); kvn@3882: kvn@3882: #ifdef _LP64 kvn@3882: kvn@3882: reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); kvn@3929: reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); kvn@3929: reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); kvn@3929: reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3)); kvn@3929: reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4)); kvn@3929: reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5)); kvn@3929: reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6)); kvn@3929: reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); kvn@3929: reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1)); kvn@3929: reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2)); kvn@3929: reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3)); kvn@3929: reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4)); kvn@3929: reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5)); kvn@3929: reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6)); kvn@3929: reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); kvn@3929: reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1)); kvn@3929: reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2)); kvn@3929: reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3)); kvn@3929: reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4)); kvn@3929: reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5)); kvn@3929: reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6)); kvn@3929: reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); kvn@3929: reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1)); kvn@3929: reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2)); kvn@3929: reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3)); kvn@3929: reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4)); kvn@3929: reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5)); kvn@3929: reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6)); kvn@3929: reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); kvn@3929: reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1)); kvn@3929: reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2)); kvn@3929: reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3)); kvn@3929: reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4)); kvn@3929: reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5)); kvn@3929: reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6)); kvn@3929: reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); kvn@3929: reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1)); kvn@3929: reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2)); kvn@3929: reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3)); kvn@3929: reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4)); kvn@3929: reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5)); kvn@3929: reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6)); kvn@3929: reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); kvn@3929: reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1)); kvn@3929: reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2)); kvn@3929: reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3)); kvn@3929: reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4)); kvn@3929: reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5)); kvn@3929: reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6)); kvn@3929: reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7)); kvn@3882: kvn@3882: reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); kvn@3929: reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1)); kvn@3929: reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2)); kvn@3929: reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3)); kvn@3929: reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4)); kvn@3929: reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5)); kvn@3929: reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6)); kvn@3929: reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7)); kvn@3882: kvn@3882: #endif // _LP64 kvn@3882: kvn@3882: #endif // _WIN64 kvn@3882: kvn@3882: #ifdef _LP64 kvn@3882: reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); kvn@3882: #else kvn@3882: reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad()); kvn@3882: #endif // _LP64 kvn@3882: kvn@3882: alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, kvn@3882: XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, kvn@3882: XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, kvn@3882: XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, kvn@3882: XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, kvn@3882: XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, kvn@3882: XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, kvn@3882: XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, kvn@3882: XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, kvn@3882: XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, kvn@3882: XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, kvn@3882: XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, kvn@3882: XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, kvn@3882: XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, kvn@3882: XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // flags allocation class should be last. kvn@3882: alloc_class chunk2(RFLAGS); kvn@3882: kvn@3882: // Singleton class for condition codes kvn@3882: reg_class int_flags(RFLAGS); kvn@3882: kvn@3882: // Class for all float registers kvn@3882: reg_class float_reg(XMM0, kvn@3882: XMM1, kvn@3882: XMM2, kvn@3882: XMM3, kvn@3882: XMM4, kvn@3882: XMM5, kvn@3882: XMM6, kvn@3882: XMM7 kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, kvn@3882: XMM9, kvn@3882: XMM10, kvn@3882: XMM11, kvn@3882: XMM12, kvn@3882: XMM13, kvn@3882: XMM14, kvn@3882: XMM15 kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // Class for all double registers kvn@3882: reg_class double_reg(XMM0, XMM0b, kvn@3882: XMM1, XMM1b, kvn@3882: XMM2, XMM2b, kvn@3882: XMM3, XMM3b, kvn@3882: XMM4, XMM4b, kvn@3882: XMM5, XMM5b, kvn@3882: XMM6, XMM6b, kvn@3882: XMM7, XMM7b kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, XMM8b, kvn@3882: XMM9, XMM9b, kvn@3882: XMM10, XMM10b, kvn@3882: XMM11, XMM11b, kvn@3882: XMM12, XMM12b, kvn@3882: XMM13, XMM13b, kvn@3882: XMM14, XMM14b, kvn@3882: XMM15, XMM15b kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // Class for all 32bit vector registers kvn@3882: reg_class vectors_reg(XMM0, kvn@3882: XMM1, kvn@3882: XMM2, kvn@3882: XMM3, kvn@3882: XMM4, kvn@3882: XMM5, kvn@3882: XMM6, kvn@3882: XMM7 kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, kvn@3882: XMM9, kvn@3882: XMM10, kvn@3882: XMM11, kvn@3882: XMM12, kvn@3882: XMM13, kvn@3882: XMM14, kvn@3882: XMM15 kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // Class for all 64bit vector registers kvn@3882: reg_class vectord_reg(XMM0, XMM0b, kvn@3882: XMM1, XMM1b, kvn@3882: XMM2, XMM2b, kvn@3882: XMM3, XMM3b, kvn@3882: XMM4, XMM4b, kvn@3882: XMM5, XMM5b, kvn@3882: XMM6, XMM6b, kvn@3882: XMM7, XMM7b kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, XMM8b, kvn@3882: XMM9, XMM9b, kvn@3882: XMM10, XMM10b, kvn@3882: XMM11, XMM11b, kvn@3882: XMM12, XMM12b, kvn@3882: XMM13, XMM13b, kvn@3882: XMM14, XMM14b, kvn@3882: XMM15, XMM15b kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // Class for all 128bit vector registers kvn@3882: reg_class vectorx_reg(XMM0, XMM0b, XMM0c, XMM0d, kvn@3882: XMM1, XMM1b, XMM1c, XMM1d, kvn@3882: XMM2, XMM2b, XMM2c, XMM2d, kvn@3882: XMM3, XMM3b, XMM3c, XMM3d, kvn@3882: XMM4, XMM4b, XMM4c, XMM4d, kvn@3882: XMM5, XMM5b, XMM5c, XMM5d, kvn@3882: XMM6, XMM6b, XMM6c, XMM6d, kvn@3882: XMM7, XMM7b, XMM7c, XMM7d kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, XMM8b, XMM8c, XMM8d, kvn@3882: XMM9, XMM9b, XMM9c, XMM9d, kvn@3882: XMM10, XMM10b, XMM10c, XMM10d, kvn@3882: XMM11, XMM11b, XMM11c, XMM11d, kvn@3882: XMM12, XMM12b, XMM12c, XMM12d, kvn@3882: XMM13, XMM13b, XMM13c, XMM13d, kvn@3882: XMM14, XMM14b, XMM14c, XMM14d, kvn@3882: XMM15, XMM15b, XMM15c, XMM15d kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: // Class for all 256bit vector registers kvn@3882: reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, kvn@3882: XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, kvn@3882: XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, kvn@3882: XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, kvn@3882: XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, kvn@3882: XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, kvn@3882: XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, kvn@3882: XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h kvn@3882: #ifdef _LP64 kvn@3882: ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, kvn@3882: XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, kvn@3882: XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, kvn@3882: XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, kvn@3882: XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, kvn@3882: XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, kvn@3882: XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, kvn@3882: XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h kvn@3882: #endif kvn@3882: ); kvn@3882: kvn@3882: %} kvn@3882: kvn@3390: source %{ kvn@3390: // Float masks come from different places depending on platform. kvn@3390: #ifdef _LP64 kvn@3390: static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } kvn@3390: static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } kvn@3390: static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } kvn@3390: static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } kvn@3390: #else kvn@3390: static address float_signmask() { return (address)float_signmask_pool; } kvn@3390: static address float_signflip() { return (address)float_signflip_pool; } kvn@3390: static address double_signmask() { return (address)double_signmask_pool; } kvn@3390: static address double_signflip() { return (address)double_signflip_pool; } kvn@3390: #endif kvn@3577: kvn@3882: kvn@4001: const bool Matcher::match_rule_supported(int opcode) { kvn@4001: if (!has_match_rule(opcode)) kvn@4001: return false; kvn@4001: kvn@4001: switch (opcode) { kvn@4001: case Op_PopCountI: kvn@4001: case Op_PopCountL: kvn@4001: if (!UsePopCountInstruction) kvn@4001: return false; kvn@4103: break; kvn@4001: case Op_MulVI: kvn@4001: if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX kvn@4001: return false; kvn@4001: break; roland@4106: case Op_CompareAndSwapL: roland@4106: #ifdef _LP64 roland@4106: case Op_CompareAndSwapP: roland@4106: #endif roland@4106: if (!VM_Version::supports_cx8()) roland@4106: return false; roland@4106: break; kvn@4001: } kvn@4001: kvn@4001: return true; // Per default match rules are supported. kvn@4001: } kvn@4001: kvn@3882: // Max vector size in bytes. 0 if not supported. kvn@3882: const int Matcher::vector_width_in_bytes(BasicType bt) { kvn@3882: assert(is_java_primitive(bt), "only primitive type vectors"); kvn@3882: if (UseSSE < 2) return 0; kvn@3882: // SSE2 supports 128bit vectors for all types. kvn@3882: // AVX2 supports 256bit vectors for all types. kvn@3882: int size = (UseAVX > 1) ? 32 : 16; kvn@3882: // AVX1 supports 256bit vectors only for FLOAT and DOUBLE. kvn@3882: if (UseAVX > 0 && (bt == T_FLOAT || bt == T_DOUBLE)) kvn@3882: size = 32; kvn@3882: // Use flag to limit vector size. kvn@3882: size = MIN2(size,(int)MaxVectorSize); kvn@3882: // Minimum 2 values in vector (or 4 for bytes). kvn@3882: switch (bt) { kvn@3882: case T_DOUBLE: kvn@3882: case T_LONG: kvn@3882: if (size < 16) return 0; kvn@3882: case T_FLOAT: kvn@3882: case T_INT: kvn@3882: if (size < 8) return 0; kvn@3882: case T_BOOLEAN: kvn@3882: case T_BYTE: kvn@3882: case T_CHAR: kvn@3882: case T_SHORT: kvn@3882: if (size < 4) return 0; kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: return size; kvn@3882: } kvn@3882: kvn@3882: // Limits on vector size (number of elements) loaded into vector. kvn@3882: const int Matcher::max_vector_size(const BasicType bt) { kvn@3882: return vector_width_in_bytes(bt)/type2aelembytes(bt); kvn@3882: } kvn@3882: const int Matcher::min_vector_size(const BasicType bt) { kvn@3882: int max_size = max_vector_size(bt); kvn@3882: // Min size which can be loaded into vector is 4 bytes. kvn@3882: int size = (type2aelembytes(bt) == 1) ? 4 : 2; kvn@3882: return MIN2(size,max_size); kvn@3882: } kvn@3882: kvn@3882: // Vector ideal reg corresponding to specidied size in bytes kvn@3882: const int Matcher::vector_ideal_reg(int size) { kvn@3882: assert(MaxVectorSize >= size, ""); kvn@3882: switch(size) { kvn@3882: case 4: return Op_VecS; kvn@3882: case 8: return Op_VecD; kvn@3882: case 16: return Op_VecX; kvn@3882: case 32: return Op_VecY; kvn@3882: } kvn@3882: ShouldNotReachHere(); kvn@3882: return 0; kvn@3882: } kvn@3882: kvn@4134: // Only lowest bits of xmm reg are used for vector shift count. kvn@4134: const int Matcher::vector_shift_count_ideal_reg(int size) { kvn@4134: return Op_VecS; kvn@4134: } kvn@4134: kvn@3882: // x86 supports misaligned vectors store/load. kvn@3882: const bool Matcher::misaligned_vectors_ok() { kvn@3882: return !AlignVector; // can be changed by flag kvn@3882: } kvn@3882: kvn@3882: // Helper methods for MachSpillCopyNode::implementation(). kvn@3882: static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, kvn@3882: int src_hi, int dst_hi, uint ireg, outputStream* st) { kvn@3882: // In 64-bit VM size calculation is very complex. Emitting instructions kvn@3882: // into scratch buffer is used to get size in 64-bit VM. kvn@3882: LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) kvn@3882: assert(ireg == Op_VecS || // 32bit vector kvn@3882: (src_lo & 1) == 0 && (src_lo + 1) == src_hi && kvn@3882: (dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi, kvn@3882: "no non-adjacent vector moves" ); kvn@3882: if (cbuf) { kvn@3882: MacroAssembler _masm(cbuf); kvn@3882: int offset = __ offset(); kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: // copy whole register kvn@3882: case Op_VecD: kvn@3882: case Op_VecX: kvn@3882: __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: int size = __ offset() - offset; kvn@3882: #ifdef ASSERT kvn@3882: // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. kvn@3882: assert(!do_size || size == 4, "incorrect size calculattion"); kvn@3882: #endif kvn@3882: return size; kvn@3882: #ifndef PRODUCT kvn@3882: } else if (!do_size) { kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: kvn@3882: case Op_VecD: kvn@3882: case Op_VecX: kvn@3882: st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: #endif kvn@3882: } kvn@3882: // VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix. kvn@3882: return 4; kvn@3882: } kvn@3882: kvn@3882: static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, kvn@3882: int stack_offset, int reg, uint ireg, outputStream* st) { kvn@3882: // In 64-bit VM size calculation is very complex. Emitting instructions kvn@3882: // into scratch buffer is used to get size in 64-bit VM. kvn@3882: LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) kvn@3882: if (cbuf) { kvn@3882: MacroAssembler _masm(cbuf); kvn@3882: int offset = __ offset(); kvn@3882: if (is_load) { kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: kvn@3882: __ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); kvn@3882: break; kvn@3882: case Op_VecD: kvn@3882: __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); kvn@3882: break; kvn@3882: case Op_VecX: kvn@3882: __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: } else { // store kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: kvn@3882: __ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); kvn@3882: break; kvn@3882: case Op_VecD: kvn@3882: __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); kvn@3882: break; kvn@3882: case Op_VecX: kvn@3882: __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: } kvn@3882: int size = __ offset() - offset; kvn@3882: #ifdef ASSERT kvn@3882: int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); kvn@3882: // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. kvn@3882: assert(!do_size || size == (5+offset_size), "incorrect size calculattion"); kvn@3882: #endif kvn@3882: return size; kvn@3882: #ifndef PRODUCT kvn@3882: } else if (!do_size) { kvn@3882: if (is_load) { kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: kvn@3882: st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); kvn@3882: break; kvn@3882: case Op_VecD: kvn@3882: st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); kvn@3882: break; kvn@3882: case Op_VecX: kvn@3882: st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: } else { // store kvn@3882: switch (ireg) { kvn@3882: case Op_VecS: kvn@3882: st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); kvn@3882: break; kvn@3882: case Op_VecD: kvn@3882: st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); kvn@3882: break; kvn@3882: case Op_VecX: kvn@3882: st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); kvn@3882: break; kvn@3882: case Op_VecY: kvn@3882: st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); kvn@3882: break; kvn@3882: default: kvn@3882: ShouldNotReachHere(); kvn@3882: } kvn@3882: } kvn@3882: #endif kvn@3882: } kvn@3882: int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4); kvn@3882: // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. kvn@3882: return 5+offset_size; kvn@3882: } kvn@3882: kvn@3882: static inline jfloat replicate4_imm(int con, int width) { kvn@3882: // Load a constant of "width" (in bytes) and replicate it to fill 32bit. kvn@3882: assert(width == 1 || width == 2, "only byte or short types here"); kvn@3882: int bit_width = width * 8; kvn@3882: jint val = con; kvn@3882: val &= (1 << bit_width) - 1; // mask off sign bits kvn@3882: while(bit_width < 32) { kvn@3882: val |= (val << bit_width); kvn@3882: bit_width <<= 1; kvn@3882: } kvn@3882: jfloat fval = *((jfloat*) &val); // coerce to float type kvn@3882: return fval; kvn@3882: } kvn@3882: kvn@3882: static inline jdouble replicate8_imm(int con, int width) { kvn@3882: // Load a constant of "width" (in bytes) and replicate it to fill 64bit. kvn@3882: assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here"); kvn@3882: int bit_width = width * 8; kvn@3882: jlong val = con; kvn@3882: val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits kvn@3882: while(bit_width < 64) { kvn@3882: val |= (val << bit_width); kvn@3882: bit_width <<= 1; kvn@3882: } kvn@3882: jdouble dval = *((jdouble*) &val); // coerce to double type kvn@3882: return dval; kvn@3882: } kvn@3882: kvn@3577: #ifndef PRODUCT kvn@3577: void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { kvn@3577: st->print("nop \t# %d bytes pad for loops and calls", _count); kvn@3577: } kvn@3577: #endif kvn@3577: kvn@3577: void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { kvn@3577: MacroAssembler _masm(&cbuf); kvn@3577: __ nop(_count); kvn@3577: } kvn@3577: kvn@3577: uint MachNopNode::size(PhaseRegAlloc*) const { kvn@3577: return _count; kvn@3577: } kvn@3577: kvn@3577: #ifndef PRODUCT kvn@3577: void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const { kvn@3577: st->print("# breakpoint"); kvn@3577: } kvn@3577: #endif kvn@3577: kvn@3577: void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const { kvn@3577: MacroAssembler _masm(&cbuf); kvn@3577: __ int3(); kvn@3577: } kvn@3577: kvn@3577: uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { kvn@3577: return MachNode::size(ra_); kvn@3577: } kvn@3577: kvn@3577: %} kvn@3577: kvn@3577: encode %{ kvn@3577: kvn@3577: enc_class preserve_SP %{ kvn@3577: debug_only(int off0 = cbuf.insts_size()); kvn@3577: MacroAssembler _masm(&cbuf); kvn@3577: // RBP is preserved across all calls, even compiled calls. kvn@3577: // Use it to preserve RSP in places where the callee might change the SP. kvn@3577: __ movptr(rbp_mh_SP_save, rsp); kvn@3577: debug_only(int off1 = cbuf.insts_size()); kvn@3577: assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); kvn@3577: %} kvn@3577: kvn@3577: enc_class restore_SP %{ kvn@3577: MacroAssembler _masm(&cbuf); kvn@3577: __ movptr(rsp, rbp_mh_SP_save); kvn@3577: %} kvn@3577: kvn@3577: enc_class call_epilog %{ kvn@3577: if (VerifyStackAtCalls) { kvn@3577: // Check that stack depth is unchanged: find majik cookie on stack kvn@3577: int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word)); kvn@3577: MacroAssembler _masm(&cbuf); kvn@3577: Label L; kvn@3577: __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d); kvn@3577: __ jccb(Assembler::equal, L); kvn@3577: // Die if stack mismatch kvn@3577: __ int3(); kvn@3577: __ bind(L); kvn@3577: } kvn@3577: %} kvn@3577: kvn@3390: %} kvn@3390: kvn@3882: kvn@3882: //----------OPERANDS----------------------------------------------------------- kvn@3882: // Operand definitions must precede instruction definitions for correct parsing kvn@3882: // in the ADLC because operands constitute user defined types which are used in kvn@3882: // instruction definitions. kvn@3882: kvn@3882: // Vectors kvn@3882: operand vecS() %{ kvn@3882: constraint(ALLOC_IN_RC(vectors_reg)); kvn@3882: match(VecS); kvn@3882: kvn@3882: format %{ %} kvn@3882: interface(REG_INTER); kvn@3882: %} kvn@3882: kvn@3882: operand vecD() %{ kvn@3882: constraint(ALLOC_IN_RC(vectord_reg)); kvn@3882: match(VecD); kvn@3882: kvn@3882: format %{ %} kvn@3882: interface(REG_INTER); kvn@3882: %} kvn@3882: kvn@3882: operand vecX() %{ kvn@3882: constraint(ALLOC_IN_RC(vectorx_reg)); kvn@3882: match(VecX); kvn@3882: kvn@3882: format %{ %} kvn@3882: interface(REG_INTER); kvn@3882: %} kvn@3882: kvn@3882: operand vecY() %{ kvn@3882: constraint(ALLOC_IN_RC(vectory_reg)); kvn@3882: match(VecY); kvn@3882: kvn@3882: format %{ %} kvn@3882: interface(REG_INTER); kvn@3882: %} kvn@3882: kvn@3882: kvn@3390: // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit) kvn@3390: kvn@3577: // ============================================================================ kvn@3577: kvn@3577: instruct ShouldNotReachHere() %{ kvn@3577: match(Halt); kvn@3577: format %{ "int3\t# ShouldNotReachHere" %} kvn@3577: ins_encode %{ kvn@3577: __ int3(); kvn@3577: %} kvn@3577: ins_pipe(pipe_slow); kvn@3577: %} kvn@3577: kvn@3577: // ============================================================================ kvn@3577: kvn@3390: instruct addF_reg(regF dst, regF src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (AddF dst src)); kvn@3390: kvn@3390: format %{ "addss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addss($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct addF_mem(regF dst, memory src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (AddF dst (LoadF src))); kvn@3390: kvn@3390: format %{ "addss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addss($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct addF_imm(regF dst, immF con) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (AddF dst con)); kvn@3390: format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addss($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddF src1 src2)); kvn@3390: kvn@3390: format %{ "vaddss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addF_reg_mem(regF dst, regF src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddF src1 (LoadF src2))); kvn@3390: kvn@3390: format %{ "vaddss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addF_reg_imm(regF dst, regF src, immF con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddF src con)); kvn@3390: kvn@3390: format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct addD_reg(regD dst, regD src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (AddD dst src)); kvn@3390: kvn@3390: format %{ "addsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addsd($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct addD_mem(regD dst, memory src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (AddD dst (LoadD src))); kvn@3390: kvn@3390: format %{ "addsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addsd($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct addD_imm(regD dst, immD con) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (AddD dst con)); kvn@3390: format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ addsd($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddD src1 src2)); kvn@3390: kvn@3390: format %{ "vaddsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addD_reg_mem(regD dst, regD src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddD src1 (LoadD src2))); kvn@3390: kvn@3390: format %{ "vaddsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct addD_reg_imm(regD dst, regD src, immD con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AddD src con)); kvn@3390: kvn@3390: format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subF_reg(regF dst, regF src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (SubF dst src)); kvn@3390: kvn@3390: format %{ "subss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subss($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subF_mem(regF dst, memory src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (SubF dst (LoadF src))); kvn@3390: kvn@3390: format %{ "subss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subss($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subF_imm(regF dst, immF con) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (SubF dst con)); kvn@3390: format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subss($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubF src1 src2)); kvn@3390: kvn@3390: format %{ "vsubss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subF_reg_mem(regF dst, regF src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubF src1 (LoadF src2))); kvn@3390: kvn@3390: format %{ "vsubss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subF_reg_imm(regF dst, regF src, immF con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubF src con)); kvn@3390: kvn@3390: format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subD_reg(regD dst, regD src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (SubD dst src)); kvn@3390: kvn@3390: format %{ "subsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subsd($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subD_mem(regD dst, memory src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (SubD dst (LoadD src))); kvn@3390: kvn@3390: format %{ "subsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subsd($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct subD_imm(regD dst, immD con) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (SubD dst con)); kvn@3390: format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ subsd($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubD src1 src2)); kvn@3390: kvn@3390: format %{ "vsubsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subD_reg_mem(regD dst, regD src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubD src1 (LoadD src2))); kvn@3390: kvn@3390: format %{ "vsubsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct subD_reg_imm(regD dst, regD src, immD con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (SubD src con)); kvn@3390: kvn@3390: format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulF_reg(regF dst, regF src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (MulF dst src)); kvn@3390: kvn@3390: format %{ "mulss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulss($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulF_mem(regF dst, memory src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (MulF dst (LoadF src))); kvn@3390: kvn@3390: format %{ "mulss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulss($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulF_imm(regF dst, immF con) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (MulF dst con)); kvn@3390: format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulss($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulF src1 src2)); kvn@3390: kvn@3390: format %{ "vmulss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulF src1 (LoadF src2))); kvn@3390: kvn@3390: format %{ "vmulss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulF_reg_imm(regF dst, regF src, immF con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulF src con)); kvn@3390: kvn@3390: format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulD_reg(regD dst, regD src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (MulD dst src)); kvn@3390: kvn@3390: format %{ "mulsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulsd($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulD_mem(regD dst, memory src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (MulD dst (LoadD src))); kvn@3390: kvn@3390: format %{ "mulsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulsd($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct mulD_imm(regD dst, immD con) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (MulD dst con)); kvn@3390: format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ mulsd($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulD src1 src2)); kvn@3390: kvn@3390: format %{ "vmulsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulD_reg_mem(regD dst, regD src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulD src1 (LoadD src2))); kvn@3390: kvn@3390: format %{ "vmulsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct mulD_reg_imm(regD dst, regD src, immD con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (MulD src con)); kvn@3390: kvn@3390: format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divF_reg(regF dst, regF src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (DivF dst src)); kvn@3390: kvn@3390: format %{ "divss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divss($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divF_mem(regF dst, memory src) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (DivF dst (LoadF src))); kvn@3390: kvn@3390: format %{ "divss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divss($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divF_imm(regF dst, immF con) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (DivF dst con)); kvn@3390: format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divss($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivF src1 src2)); kvn@3390: kvn@3390: format %{ "vdivss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divF_reg_mem(regF dst, regF src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivF src1 (LoadF src2))); kvn@3390: kvn@3390: format %{ "vdivss $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divF_reg_imm(regF dst, regF src, immF con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivF src con)); kvn@3390: kvn@3390: format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divD_reg(regD dst, regD src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (DivD dst src)); kvn@3390: kvn@3390: format %{ "divsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divsd($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divD_mem(regD dst, memory src) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (DivD dst (LoadD src))); kvn@3390: kvn@3390: format %{ "divsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divsd($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct divD_imm(regD dst, immD con) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (DivD dst con)); kvn@3390: format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ divsd($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivD src1 src2)); kvn@3390: kvn@3390: format %{ "vdivsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divD_reg_mem(regD dst, regD src1, memory src2) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivD src1 (LoadD src2))); kvn@3390: kvn@3390: format %{ "vdivsd $dst, $src1, $src2" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct divD_reg_imm(regD dst, regD src, immD con) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (DivD src con)); kvn@3390: kvn@3390: format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct absF_reg(regF dst) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (AbsF dst)); kvn@3390: ins_cost(150); kvn@3390: format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} kvn@3390: ins_encode %{ kvn@3390: __ andps($dst$$XMMRegister, ExternalAddress(float_signmask())); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct absF_reg_reg(regF dst, regF src) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AbsF src)); kvn@3390: ins_cost(150); kvn@3390: format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %} kvn@3390: ins_encode %{ kvn@4001: bool vector256 = false; kvn@3390: __ vandps($dst$$XMMRegister, $src$$XMMRegister, kvn@4001: ExternalAddress(float_signmask()), vector256); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct absD_reg(regD dst) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (AbsD dst)); kvn@3390: ins_cost(150); kvn@3390: format %{ "andpd $dst, [0x7fffffffffffffff]\t" kvn@3390: "# abs double by sign masking" %} kvn@3390: ins_encode %{ kvn@3390: __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask())); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct absD_reg_reg(regD dst, regD src) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (AbsD src)); kvn@3390: ins_cost(150); kvn@3390: format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t" kvn@3390: "# abs double by sign masking" %} kvn@3390: ins_encode %{ kvn@4001: bool vector256 = false; kvn@3390: __ vandpd($dst$$XMMRegister, $src$$XMMRegister, kvn@4001: ExternalAddress(double_signmask()), vector256); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct negF_reg(regF dst) %{ kvn@3390: predicate((UseSSE>=1) && (UseAVX == 0)); kvn@3390: match(Set dst (NegF dst)); kvn@3390: ins_cost(150); kvn@3390: format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} kvn@3390: ins_encode %{ kvn@3390: __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip())); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct negF_reg_reg(regF dst, regF src) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (NegF src)); kvn@3390: ins_cost(150); kvn@3390: format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %} kvn@3390: ins_encode %{ kvn@4001: bool vector256 = false; kvn@3390: __ vxorps($dst$$XMMRegister, $src$$XMMRegister, kvn@4001: ExternalAddress(float_signflip()), vector256); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct negD_reg(regD dst) %{ kvn@3390: predicate((UseSSE>=2) && (UseAVX == 0)); kvn@3390: match(Set dst (NegD dst)); kvn@3390: ins_cost(150); kvn@3390: format %{ "xorpd $dst, [0x8000000000000000]\t" kvn@3390: "# neg double by sign flipping" %} kvn@3390: ins_encode %{ kvn@3390: __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip())); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3929: instruct negD_reg_reg(regD dst, regD src) %{ kvn@3390: predicate(UseAVX > 0); kvn@3390: match(Set dst (NegD src)); kvn@3390: ins_cost(150); kvn@3390: format %{ "vxorpd $dst, $src, [0x8000000000000000]\t" kvn@3390: "# neg double by sign flipping" %} kvn@3390: ins_encode %{ kvn@4001: bool vector256 = false; kvn@3390: __ vxorpd($dst$$XMMRegister, $src$$XMMRegister, kvn@4001: ExternalAddress(double_signflip()), vector256); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtF_reg(regF dst, regF src) %{ kvn@3390: predicate(UseSSE>=1); kvn@3390: match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); kvn@3390: kvn@3390: format %{ "sqrtss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtss($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtF_mem(regF dst, memory src) %{ kvn@3390: predicate(UseSSE>=1); kvn@3390: match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src))))); kvn@3390: kvn@3390: format %{ "sqrtss $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtss($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtF_imm(regF dst, immF con) %{ kvn@3390: predicate(UseSSE>=1); kvn@3390: match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); kvn@3390: format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtss($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtD_reg(regD dst, regD src) %{ kvn@3390: predicate(UseSSE>=2); kvn@3390: match(Set dst (SqrtD src)); kvn@3390: kvn@3390: format %{ "sqrtsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtD_mem(regD dst, memory src) %{ kvn@3390: predicate(UseSSE>=2); kvn@3390: match(Set dst (SqrtD (LoadD src))); kvn@3390: kvn@3390: format %{ "sqrtsd $dst, $src" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtsd($dst$$XMMRegister, $src$$Address); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3390: instruct sqrtD_imm(regD dst, immD con) %{ kvn@3390: predicate(UseSSE>=2); kvn@3390: match(Set dst (SqrtD con)); kvn@3390: format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} kvn@3390: ins_cost(150); kvn@3390: ins_encode %{ kvn@3390: __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); kvn@3390: %} kvn@3390: ins_pipe(pipe_slow); kvn@3390: %} kvn@3390: kvn@3882: kvn@3882: // ====================VECTOR INSTRUCTIONS===================================== kvn@3882: kvn@3882: // Load vectors (4 bytes long) kvn@3882: instruct loadV4(vecS dst, memory mem) %{ kvn@3882: predicate(n->as_LoadVector()->memory_size() == 4); kvn@3882: match(Set dst (LoadVector mem)); kvn@3882: ins_cost(125); kvn@3882: format %{ "movd $dst,$mem\t! load vector (4 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $mem$$Address); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Load vectors (8 bytes long) kvn@3882: instruct loadV8(vecD dst, memory mem) %{ kvn@3882: predicate(n->as_LoadVector()->memory_size() == 8); kvn@3882: match(Set dst (LoadVector mem)); kvn@3882: ins_cost(125); kvn@3882: format %{ "movq $dst,$mem\t! load vector (8 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movq($dst$$XMMRegister, $mem$$Address); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Load vectors (16 bytes long) kvn@3882: instruct loadV16(vecX dst, memory mem) %{ kvn@3882: predicate(n->as_LoadVector()->memory_size() == 16); kvn@3882: match(Set dst (LoadVector mem)); kvn@3882: ins_cost(125); kvn@3882: format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movdqu($dst$$XMMRegister, $mem$$Address); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Load vectors (32 bytes long) kvn@3882: instruct loadV32(vecY dst, memory mem) %{ kvn@3882: predicate(n->as_LoadVector()->memory_size() == 32); kvn@3882: match(Set dst (LoadVector mem)); kvn@3882: ins_cost(125); kvn@3882: format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ vmovdqu($dst$$XMMRegister, $mem$$Address); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Store vectors kvn@3882: instruct storeV4(memory mem, vecS src) %{ kvn@3882: predicate(n->as_StoreVector()->memory_size() == 4); kvn@3882: match(Set mem (StoreVector mem src)); kvn@3882: ins_cost(145); kvn@3882: format %{ "movd $mem,$src\t! store vector (4 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($mem$$Address, $src$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct storeV8(memory mem, vecD src) %{ kvn@3882: predicate(n->as_StoreVector()->memory_size() == 8); kvn@3882: match(Set mem (StoreVector mem src)); kvn@3882: ins_cost(145); kvn@3882: format %{ "movq $mem,$src\t! store vector (8 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movq($mem$$Address, $src$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct storeV16(memory mem, vecX src) %{ kvn@3882: predicate(n->as_StoreVector()->memory_size() == 16); kvn@3882: match(Set mem (StoreVector mem src)); kvn@3882: ins_cost(145); kvn@3882: format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ movdqu($mem$$Address, $src$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct storeV32(memory mem, vecY src) %{ kvn@3882: predicate(n->as_StoreVector()->memory_size() == 32); kvn@3882: match(Set mem (StoreVector mem src)); kvn@3882: ins_cost(145); kvn@3882: format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %} kvn@3882: ins_encode %{ kvn@3882: __ vmovdqu($mem$$Address, $src$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate byte scalar to be vector kvn@3882: instruct Repl4B(vecS dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateB src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "punpcklbw $dst,$dst\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\t! replicate4B" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8B(vecD dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateB src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "punpcklbw $dst,$dst\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\t! replicate8B" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16B(vecX dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateB src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "punpcklbw $dst,$dst\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate16B" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl32B(vecY dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 32); kvn@3882: match(Set dst (ReplicateB src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "punpcklbw $dst,$dst\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate32B" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate byte scalar immediate to be vector by loading from const table. kvn@3882: instruct Repl4B_imm(vecS dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateB con)); kvn@3929: format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1))); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8B_imm(vecD dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateB con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16B_imm(vecX dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateB con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate16B($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl32B_imm(vecY dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 32); kvn@3882: match(Set dst (ReplicateB con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate byte scalar zero to be vector kvn@3882: instruct Repl4B_zero(vecS dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateB zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate4B zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8B_zero(vecD dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateB zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate8B zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16B_zero(vecX dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateB zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate16B zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl32B_zero(vecY dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 32); kvn@3882: match(Set dst (ReplicateB zero)); kvn@3929: format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %} kvn@3882: ins_encode %{ kvn@3882: // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). kvn@3882: bool vector256 = true; kvn@3929: __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate char/short (2 byte) scalar to be vector kvn@3882: instruct Repl2S(vecS dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateS src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\t! replicate2S" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4S(vecD dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateS src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\t! replicate4S" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8S(vecX dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateS src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate8S" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16S(vecY dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateS src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshuflw $dst,$dst,0x00\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate16S" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate char/short (2 byte) scalar immediate to be vector by loading from const table. kvn@3882: instruct Repl2S_imm(vecS dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateS con)); kvn@3929: format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2))); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4S_imm(vecD dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateS con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8S_imm(vecX dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateS con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate8S($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16S_imm(vecY dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateS con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate char/short (2 byte) scalar zero to be vector kvn@3882: instruct Repl2S_zero(vecS dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateS zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate2S zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4S_zero(vecD dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateS zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate4S zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8S_zero(vecX dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateS zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate8S zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl16S_zero(vecY dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 16); kvn@3882: match(Set dst (ReplicateS zero)); kvn@3929: format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %} kvn@3882: ins_encode %{ kvn@3882: // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). kvn@3882: bool vector256 = true; kvn@3929: __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate integer (4 byte) scalar to be vector kvn@3882: instruct Repl2I(vecD dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateI src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshufd $dst,$dst,0x00\t! replicate2I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4I(vecX dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateI src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshufd $dst,$dst,0x00\t! replicate4I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8I(vecY dst, rRegI src) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateI src)); kvn@3882: format %{ "movd $dst,$src\n\t" kvn@3882: "pshufd $dst,$dst,0x00\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate8I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate integer (4 byte) scalar immediate to be vector by loading from const table. kvn@3882: instruct Repl2I_imm(vecD dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateI con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4I_imm(vecX dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateI con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t" kvn@3929: "punpcklqdq $dst,$dst" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8I_imm(vecY dst, immI con) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateI con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Integer could be loaded into xmm register directly from memory. kvn@3882: instruct Repl2I_mem(vecD dst, memory mem) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3929: match(Set dst (ReplicateI (LoadI mem))); kvn@3882: format %{ "movd $dst,$mem\n\t" kvn@3882: "pshufd $dst,$dst,0x00\t! replicate2I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $mem$$Address); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4I_mem(vecX dst, memory mem) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3929: match(Set dst (ReplicateI (LoadI mem))); kvn@3882: format %{ "movd $dst,$mem\n\t" kvn@3882: "pshufd $dst,$dst,0x00\t! replicate4I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $mem$$Address); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8I_mem(vecY dst, memory mem) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3929: match(Set dst (ReplicateI (LoadI mem))); kvn@3882: format %{ "movd $dst,$mem\n\t" kvn@3882: "pshufd $dst,$dst,0x00\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate8I" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $mem$$Address); kvn@3882: __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate integer (4 byte) scalar zero to be vector kvn@3882: instruct Repl2I_zero(vecD dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateI zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate2I" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4I_zero(vecX dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateI zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate4I zero)" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8I_zero(vecY dst, immI0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateI zero)); kvn@3929: format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %} kvn@3882: ins_encode %{ kvn@3882: // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). kvn@3882: bool vector256 = true; kvn@3929: __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate long (8 byte) scalar to be vector kvn@3882: #ifdef _LP64 kvn@3882: instruct Repl2L(vecX dst, rRegL src) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateL src)); kvn@3882: format %{ "movdq $dst,$src\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate2L" %} kvn@3882: ins_encode %{ kvn@3882: __ movdq($dst$$XMMRegister, $src$$Register); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4L(vecY dst, rRegL src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateL src)); kvn@3882: format %{ "movdq $dst,$src\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate4L" %} kvn@3882: ins_encode %{ kvn@3882: __ movdq($dst$$XMMRegister, $src$$Register); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: #else // _LP64 kvn@3882: instruct Repl2L(vecX dst, eRegL src, regD tmp) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateL src)); kvn@3882: effect(TEMP dst, USE src, TEMP tmp); kvn@3882: format %{ "movdl $dst,$src.lo\n\t" kvn@3882: "movdl $tmp,$src.hi\n\t" kvn@3882: "punpckldq $dst,$tmp\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate2L"%} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); kvn@3882: __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4L(vecY dst, eRegL src, regD tmp) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateL src)); kvn@3882: effect(TEMP dst, USE src, TEMP tmp); kvn@3882: format %{ "movdl $dst,$src.lo\n\t" kvn@3882: "movdl $tmp,$src.hi\n\t" kvn@3882: "punpckldq $dst,$tmp\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate4L" %} kvn@3882: ins_encode %{ kvn@3882: __ movdl($dst$$XMMRegister, $src$$Register); kvn@3882: __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); kvn@3882: __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: #endif // _LP64 kvn@3882: kvn@3882: // Replicate long (8 byte) scalar immediate to be vector by loading from const table. kvn@3882: instruct Repl2L_imm(vecX dst, immL con) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateL con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate2L($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress($con)); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4L_imm(vecY dst, immL con) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateL con)); kvn@3929: format %{ "movq $dst,[$constantaddress]\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %} kvn@3882: ins_encode %{ kvn@3929: __ movq($dst$$XMMRegister, $constantaddress($con)); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Long could be loaded into xmm register directly from memory. kvn@3882: instruct Repl2L_mem(vecX dst, memory mem) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3929: match(Set dst (ReplicateL (LoadL mem))); kvn@3882: format %{ "movq $dst,$mem\n\t" kvn@3929: "punpcklqdq $dst,$dst\t! replicate2L" %} kvn@3882: ins_encode %{ kvn@3882: __ movq($dst$$XMMRegister, $mem$$Address); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4L_mem(vecY dst, memory mem) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3929: match(Set dst (ReplicateL (LoadL mem))); kvn@3882: format %{ "movq $dst,$mem\n\t" kvn@3929: "punpcklqdq $dst,$dst\n\t" kvn@3929: "vinserti128h $dst,$dst,$dst\t! replicate4L" %} kvn@3882: ins_encode %{ kvn@3882: __ movq($dst$$XMMRegister, $mem$$Address); kvn@3929: __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); kvn@3929: __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate long (8 byte) scalar zero to be vector kvn@3882: instruct Repl2L_zero(vecX dst, immL0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateL zero)); kvn@3882: format %{ "pxor $dst,$dst\t! replicate2L zero" %} kvn@3882: ins_encode %{ kvn@3882: __ pxor($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4L_zero(vecY dst, immL0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateL zero)); kvn@3929: format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %} kvn@3882: ins_encode %{ kvn@3882: // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). kvn@3882: bool vector256 = true; kvn@3929: __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate float (4 byte) scalar to be vector kvn@3882: instruct Repl2F(vecD dst, regF src) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateF src)); kvn@3882: format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %} kvn@3882: ins_encode %{ kvn@3882: __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4F(vecX dst, regF src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateF src)); kvn@3882: format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %} kvn@3882: ins_encode %{ kvn@3882: __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8F(vecY dst, regF src) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateF src)); kvn@3882: format %{ "pshufd $dst,$src,0x00\n\t" kvn@3882: "vinsertf128h $dst,$dst,$dst\t! replicate8F" %} kvn@3882: ins_encode %{ kvn@3882: __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); kvn@3882: __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate float (4 byte) scalar zero to be vector kvn@3882: instruct Repl2F_zero(vecD dst, immF0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateF zero)); kvn@3882: format %{ "xorps $dst,$dst\t! replicate2F zero" %} kvn@3882: ins_encode %{ kvn@3882: __ xorps($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4F_zero(vecX dst, immF0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateF zero)); kvn@3882: format %{ "xorps $dst,$dst\t! replicate4F zero" %} kvn@3882: ins_encode %{ kvn@3882: __ xorps($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl8F_zero(vecY dst, immF0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 8); kvn@3882: match(Set dst (ReplicateF zero)); kvn@3882: format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %} kvn@3882: ins_encode %{ kvn@3882: bool vector256 = true; kvn@3882: __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate double (8 bytes) scalar to be vector kvn@3882: instruct Repl2D(vecX dst, regD src) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateD src)); kvn@3882: format %{ "pshufd $dst,$src,0x44\t! replicate2D" %} kvn@3882: ins_encode %{ kvn@3882: __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4D(vecY dst, regD src) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateD src)); kvn@3882: format %{ "pshufd $dst,$src,0x44\n\t" kvn@3882: "vinsertf128h $dst,$dst,$dst\t! replicate4D" %} kvn@3882: ins_encode %{ kvn@3882: __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); kvn@3882: __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( pipe_slow ); kvn@3882: %} kvn@3882: kvn@3882: // Replicate double (8 byte) scalar zero to be vector kvn@3882: instruct Repl2D_zero(vecX dst, immD0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 2); kvn@3882: match(Set dst (ReplicateD zero)); kvn@3882: format %{ "xorpd $dst,$dst\t! replicate2D zero" %} kvn@3882: ins_encode %{ kvn@3882: __ xorpd($dst$$XMMRegister, $dst$$XMMRegister); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@3882: instruct Repl4D_zero(vecY dst, immD0 zero) %{ kvn@3882: predicate(n->as_Vector()->length() == 4); kvn@3882: match(Set dst (ReplicateD zero)); kvn@3882: format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %} kvn@3882: ins_encode %{ kvn@3882: bool vector256 = true; kvn@3882: __ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256); kvn@3882: %} kvn@3882: ins_pipe( fpu_reg_reg ); kvn@3882: %} kvn@3882: kvn@4001: // ====================VECTOR ARITHMETIC======================================= kvn@4001: kvn@4001: // --------------------------------- ADD -------------------------------------- kvn@4001: kvn@4001: // Bytes vector add kvn@4001: instruct vadd4B(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVB dst src)); kvn@4001: format %{ "paddb $dst,$src\t! add packed4B" %} kvn@4001: ins_encode %{ kvn@4001: __ paddb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4B_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVB src1 src2)); kvn@4001: format %{ "vpaddb $dst,$src1,$src2\t! add packed4B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8B(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVB dst src)); kvn@4001: format %{ "paddb $dst,$src\t! add packed8B" %} kvn@4001: ins_encode %{ kvn@4001: __ paddb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVB src1 src2)); kvn@4001: format %{ "vpaddb $dst,$src1,$src2\t! add packed8B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd16B(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 16); kvn@4001: match(Set dst (AddVB dst src)); kvn@4001: format %{ "paddb $dst,$src\t! add packed16B" %} kvn@4001: ins_encode %{ kvn@4001: __ paddb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (AddVB src1 src2)); kvn@4001: format %{ "vpaddb $dst,$src1,$src2\t! add packed16B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd16B_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (AddVB src (LoadVector mem))); kvn@4001: format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd32B_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 32); kvn@4001: match(Set dst (AddVB src1 src2)); kvn@4001: format %{ "vpaddb $dst,$src1,$src2\t! add packed32B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd32B_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 32); kvn@4001: match(Set dst (AddVB src (LoadVector mem))); kvn@4001: format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Shorts/Chars vector add kvn@4001: instruct vadd2S(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVS dst src)); kvn@4001: format %{ "paddw $dst,$src\t! add packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ paddw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2S_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVS src1 src2)); kvn@4001: format %{ "vpaddw $dst,$src1,$src2\t! add packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4S(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVS dst src)); kvn@4001: format %{ "paddw $dst,$src\t! add packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ paddw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVS src1 src2)); kvn@4001: format %{ "vpaddw $dst,$src1,$src2\t! add packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8S(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVS dst src)); kvn@4001: format %{ "paddw $dst,$src\t! add packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ paddw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVS src1 src2)); kvn@4001: format %{ "vpaddw $dst,$src1,$src2\t! add packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8S_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVS src (LoadVector mem))); kvn@4001: format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd16S_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (AddVS src1 src2)); kvn@4001: format %{ "vpaddw $dst,$src1,$src2\t! add packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd16S_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (AddVS src (LoadVector mem))); kvn@4001: format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Integers vector add kvn@4001: instruct vadd2I(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVI dst src)); kvn@4001: format %{ "paddd $dst,$src\t! add packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ paddd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVI src1 src2)); kvn@4001: format %{ "vpaddd $dst,$src1,$src2\t! add packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4I(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVI dst src)); kvn@4001: format %{ "paddd $dst,$src\t! add packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ paddd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVI src1 src2)); kvn@4001: format %{ "vpaddd $dst,$src1,$src2\t! add packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4I_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVI src (LoadVector mem))); kvn@4001: format %{ "vpaddd $dst,$src,$mem\t! add packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8I_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVI src1 src2)); kvn@4001: format %{ "vpaddd $dst,$src1,$src2\t! add packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8I_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVI src (LoadVector mem))); kvn@4001: format %{ "vpaddd $dst,$src,$mem\t! add packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Longs vector add kvn@4001: instruct vadd2L(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVL dst src)); kvn@4001: format %{ "paddq $dst,$src\t! add packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ paddq($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVL src1 src2)); kvn@4001: format %{ "vpaddq $dst,$src1,$src2\t! add packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2L_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVL src (LoadVector mem))); kvn@4001: format %{ "vpaddq $dst,$src,$mem\t! add packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4L_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVL src1 src2)); kvn@4001: format %{ "vpaddq $dst,$src1,$src2\t! add packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4L_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVL src (LoadVector mem))); kvn@4001: format %{ "vpaddq $dst,$src,$mem\t! add packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Floats vector add kvn@4001: instruct vadd2F(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVF dst src)); kvn@4001: format %{ "addps $dst,$src\t! add packed2F" %} kvn@4001: ins_encode %{ kvn@4001: __ addps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVF src1 src2)); kvn@4001: format %{ "vaddps $dst,$src1,$src2\t! add packed2F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4F(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVF dst src)); kvn@4001: format %{ "addps $dst,$src\t! add packed4F" %} kvn@4001: ins_encode %{ kvn@4001: __ addps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVF src1 src2)); kvn@4001: format %{ "vaddps $dst,$src1,$src2\t! add packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4F_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVF src (LoadVector mem))); kvn@4001: format %{ "vaddps $dst,$src,$mem\t! add packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8F_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVF src1 src2)); kvn@4001: format %{ "vaddps $dst,$src1,$src2\t! add packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd8F_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (AddVF src (LoadVector mem))); kvn@4001: format %{ "vaddps $dst,$src,$mem\t! add packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Doubles vector add kvn@4001: instruct vadd2D(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVD dst src)); kvn@4001: format %{ "addpd $dst,$src\t! add packed2D" %} kvn@4001: ins_encode %{ kvn@4001: __ addpd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVD src1 src2)); kvn@4001: format %{ "vaddpd $dst,$src1,$src2\t! add packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd2D_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (AddVD src (LoadVector mem))); kvn@4001: format %{ "vaddpd $dst,$src,$mem\t! add packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4D_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVD src1 src2)); kvn@4001: format %{ "vaddpd $dst,$src1,$src2\t! add packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vadd4D_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (AddVD src (LoadVector mem))); kvn@4001: format %{ "vaddpd $dst,$src,$mem\t! add packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // --------------------------------- SUB -------------------------------------- kvn@4001: kvn@4001: // Bytes vector sub kvn@4001: instruct vsub4B(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVB dst src)); kvn@4001: format %{ "psubb $dst,$src\t! sub packed4B" %} kvn@4001: ins_encode %{ kvn@4001: __ psubb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4B_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVB src1 src2)); kvn@4001: format %{ "vpsubb $dst,$src1,$src2\t! sub packed4B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8B(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVB dst src)); kvn@4001: format %{ "psubb $dst,$src\t! sub packed8B" %} kvn@4001: ins_encode %{ kvn@4001: __ psubb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVB src1 src2)); kvn@4001: format %{ "vpsubb $dst,$src1,$src2\t! sub packed8B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub16B(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 16); kvn@4001: match(Set dst (SubVB dst src)); kvn@4001: format %{ "psubb $dst,$src\t! sub packed16B" %} kvn@4001: ins_encode %{ kvn@4001: __ psubb($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (SubVB src1 src2)); kvn@4001: format %{ "vpsubb $dst,$src1,$src2\t! sub packed16B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub16B_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (SubVB src (LoadVector mem))); kvn@4001: format %{ "vpsubb $dst,$src,$mem\t! sub packed16B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub32B_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 32); kvn@4001: match(Set dst (SubVB src1 src2)); kvn@4001: format %{ "vpsubb $dst,$src1,$src2\t! sub packed32B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub32B_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 32); kvn@4001: match(Set dst (SubVB src (LoadVector mem))); kvn@4001: format %{ "vpsubb $dst,$src,$mem\t! sub packed32B" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Shorts/Chars vector sub kvn@4001: instruct vsub2S(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVS dst src)); kvn@4001: format %{ "psubw $dst,$src\t! sub packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ psubw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2S_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVS src1 src2)); kvn@4001: format %{ "vpsubw $dst,$src1,$src2\t! sub packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4S(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVS dst src)); kvn@4001: format %{ "psubw $dst,$src\t! sub packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ psubw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVS src1 src2)); kvn@4001: format %{ "vpsubw $dst,$src1,$src2\t! sub packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8S(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVS dst src)); kvn@4001: format %{ "psubw $dst,$src\t! sub packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ psubw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVS src1 src2)); kvn@4001: format %{ "vpsubw $dst,$src1,$src2\t! sub packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8S_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVS src (LoadVector mem))); kvn@4001: format %{ "vpsubw $dst,$src,$mem\t! sub packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub16S_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (SubVS src1 src2)); kvn@4001: format %{ "vpsubw $dst,$src1,$src2\t! sub packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub16S_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (SubVS src (LoadVector mem))); kvn@4001: format %{ "vpsubw $dst,$src,$mem\t! sub packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Integers vector sub kvn@4001: instruct vsub2I(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVI dst src)); kvn@4001: format %{ "psubd $dst,$src\t! sub packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ psubd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVI src1 src2)); kvn@4001: format %{ "vpsubd $dst,$src1,$src2\t! sub packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4I(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVI dst src)); kvn@4001: format %{ "psubd $dst,$src\t! sub packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ psubd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVI src1 src2)); kvn@4001: format %{ "vpsubd $dst,$src1,$src2\t! sub packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4I_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVI src (LoadVector mem))); kvn@4001: format %{ "vpsubd $dst,$src,$mem\t! sub packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8I_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVI src1 src2)); kvn@4001: format %{ "vpsubd $dst,$src1,$src2\t! sub packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8I_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVI src (LoadVector mem))); kvn@4001: format %{ "vpsubd $dst,$src,$mem\t! sub packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Longs vector sub kvn@4001: instruct vsub2L(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVL dst src)); kvn@4001: format %{ "psubq $dst,$src\t! sub packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ psubq($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVL src1 src2)); kvn@4001: format %{ "vpsubq $dst,$src1,$src2\t! sub packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2L_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVL src (LoadVector mem))); kvn@4001: format %{ "vpsubq $dst,$src,$mem\t! sub packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4L_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVL src1 src2)); kvn@4001: format %{ "vpsubq $dst,$src1,$src2\t! sub packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4L_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVL src (LoadVector mem))); kvn@4001: format %{ "vpsubq $dst,$src,$mem\t! sub packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Floats vector sub kvn@4001: instruct vsub2F(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVF dst src)); kvn@4001: format %{ "subps $dst,$src\t! sub packed2F" %} kvn@4001: ins_encode %{ kvn@4001: __ subps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVF src1 src2)); kvn@4001: format %{ "vsubps $dst,$src1,$src2\t! sub packed2F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4F(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVF dst src)); kvn@4001: format %{ "subps $dst,$src\t! sub packed4F" %} kvn@4001: ins_encode %{ kvn@4001: __ subps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVF src1 src2)); kvn@4001: format %{ "vsubps $dst,$src1,$src2\t! sub packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4F_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVF src (LoadVector mem))); kvn@4001: format %{ "vsubps $dst,$src,$mem\t! sub packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8F_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVF src1 src2)); kvn@4001: format %{ "vsubps $dst,$src1,$src2\t! sub packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub8F_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (SubVF src (LoadVector mem))); kvn@4001: format %{ "vsubps $dst,$src,$mem\t! sub packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Doubles vector sub kvn@4001: instruct vsub2D(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVD dst src)); kvn@4001: format %{ "subpd $dst,$src\t! sub packed2D" %} kvn@4001: ins_encode %{ kvn@4001: __ subpd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVD src1 src2)); kvn@4001: format %{ "vsubpd $dst,$src1,$src2\t! sub packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub2D_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (SubVD src (LoadVector mem))); kvn@4001: format %{ "vsubpd $dst,$src,$mem\t! sub packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4D_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVD src1 src2)); kvn@4001: format %{ "vsubpd $dst,$src1,$src2\t! sub packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsub4D_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (SubVD src (LoadVector mem))); kvn@4001: format %{ "vsubpd $dst,$src,$mem\t! sub packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // --------------------------------- MUL -------------------------------------- kvn@4001: kvn@4001: // Shorts/Chars vector mul kvn@4001: instruct vmul2S(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVS dst src)); kvn@4001: format %{ "pmullw $dst,$src\t! mul packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ pmullw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul2S_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVS src1 src2)); kvn@4001: format %{ "vpmullw $dst,$src1,$src2\t! mul packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4S(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVS dst src)); kvn@4001: format %{ "pmullw $dst,$src\t! mul packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ pmullw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVS src1 src2)); kvn@4001: format %{ "vpmullw $dst,$src1,$src2\t! mul packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8S(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVS dst src)); kvn@4001: format %{ "pmullw $dst,$src\t! mul packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ pmullw($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVS src1 src2)); kvn@4001: format %{ "vpmullw $dst,$src1,$src2\t! mul packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8S_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVS src (LoadVector mem))); kvn@4001: format %{ "vpmullw $dst,$src,$mem\t! mul packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul16S_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (MulVS src1 src2)); kvn@4001: format %{ "vpmullw $dst,$src1,$src2\t! mul packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul16S_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (MulVS src (LoadVector mem))); kvn@4001: format %{ "vpmullw $dst,$src,$mem\t! mul packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Integers vector mul (sse4_1) kvn@4001: instruct vmul2I(vecD dst, vecD src) %{ kvn@4001: predicate(UseSSE > 3 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVI dst src)); kvn@4001: format %{ "pmulld $dst,$src\t! mul packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ pmulld($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVI src1 src2)); kvn@4001: format %{ "vpmulld $dst,$src1,$src2\t! mul packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4I(vecX dst, vecX src) %{ kvn@4001: predicate(UseSSE > 3 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVI dst src)); kvn@4001: format %{ "pmulld $dst,$src\t! mul packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ pmulld($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVI src1 src2)); kvn@4001: format %{ "vpmulld $dst,$src1,$src2\t! mul packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4I_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVI src (LoadVector mem))); kvn@4001: format %{ "vpmulld $dst,$src,$mem\t! mul packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVI src1 src2)); kvn@4001: format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8I_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVI src (LoadVector mem))); kvn@4001: format %{ "vpmulld $dst,$src,$mem\t! mul packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Floats vector mul kvn@4001: instruct vmul2F(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVF dst src)); kvn@4001: format %{ "mulps $dst,$src\t! mul packed2F" %} kvn@4001: ins_encode %{ kvn@4001: __ mulps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVF src1 src2)); kvn@4001: format %{ "vmulps $dst,$src1,$src2\t! mul packed2F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4F(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVF dst src)); kvn@4001: format %{ "mulps $dst,$src\t! mul packed4F" %} kvn@4001: ins_encode %{ kvn@4001: __ mulps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVF src1 src2)); kvn@4001: format %{ "vmulps $dst,$src1,$src2\t! mul packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4F_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVF src (LoadVector mem))); kvn@4001: format %{ "vmulps $dst,$src,$mem\t! mul packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8F_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVF src1 src2)); kvn@4001: format %{ "vmulps $dst,$src1,$src2\t! mul packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul8F_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (MulVF src (LoadVector mem))); kvn@4001: format %{ "vmulps $dst,$src,$mem\t! mul packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Doubles vector mul kvn@4001: instruct vmul2D(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVD dst src)); kvn@4001: format %{ "mulpd $dst,$src\t! mul packed2D" %} kvn@4001: ins_encode %{ kvn@4001: __ mulpd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVD src1 src2)); kvn@4001: format %{ "vmulpd $dst,$src1,$src2\t! mul packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul2D_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (MulVD src (LoadVector mem))); kvn@4001: format %{ "vmulpd $dst,$src,$mem\t! mul packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4D_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVD src1 src2)); kvn@4001: format %{ "vmulpd $dst,$src1,$src2\t! mul packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vmul4D_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (MulVD src (LoadVector mem))); kvn@4001: format %{ "vmulpd $dst,$src,$mem\t! mul packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // --------------------------------- DIV -------------------------------------- kvn@4001: kvn@4001: // Floats vector div kvn@4001: instruct vdiv2F(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (DivVF dst src)); kvn@4001: format %{ "divps $dst,$src\t! div packed2F" %} kvn@4001: ins_encode %{ kvn@4001: __ divps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv2F_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (DivVF src1 src2)); kvn@4001: format %{ "vdivps $dst,$src1,$src2\t! div packed2F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv4F(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (DivVF dst src)); kvn@4001: format %{ "divps $dst,$src\t! div packed4F" %} kvn@4001: ins_encode %{ kvn@4001: __ divps($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (DivVF src1 src2)); kvn@4001: format %{ "vdivps $dst,$src1,$src2\t! div packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv4F_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (DivVF src (LoadVector mem))); kvn@4001: format %{ "vdivps $dst,$src,$mem\t! div packed4F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv8F_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (DivVF src1 src2)); kvn@4001: format %{ "vdivps $dst,$src1,$src2\t! div packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv8F_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (DivVF src (LoadVector mem))); kvn@4001: format %{ "vdivps $dst,$src,$mem\t! div packed8F" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Doubles vector div kvn@4001: instruct vdiv2D(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (DivVD dst src)); kvn@4001: format %{ "divpd $dst,$src\t! div packed2D" %} kvn@4001: ins_encode %{ kvn@4001: __ divpd($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (DivVD src1 src2)); kvn@4001: format %{ "vdivpd $dst,$src1,$src2\t! div packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv2D_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (DivVD src (LoadVector mem))); kvn@4001: format %{ "vdivpd $dst,$src,$mem\t! div packed2D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv4D_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (DivVD src1 src2)); kvn@4001: format %{ "vdivpd $dst,$src1,$src2\t! div packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vdiv4D_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (DivVD src (LoadVector mem))); kvn@4001: format %{ "vdivpd $dst,$src,$mem\t! div packed4D" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: // ------------------------------ Shift --------------------------------------- kvn@4134: kvn@4134: // Left and right shift count vectors are the same on x86 kvn@4134: // (only lowest bits of xmm reg are used for count). kvn@4134: instruct vshiftcnt(vecS dst, rRegI cnt) %{ kvn@4134: match(Set dst (LShiftCntV cnt)); kvn@4134: match(Set dst (RShiftCntV cnt)); kvn@4134: format %{ "movd $dst,$cnt\t! load shift count" %} kvn@4134: ins_encode %{ kvn@4134: __ movdl($dst$$XMMRegister, $cnt$$Register); kvn@4134: %} kvn@4134: ins_pipe( pipe_slow ); kvn@4134: %} kvn@4134: kvn@4001: // ------------------------------ LeftShift ----------------------------------- kvn@4001: kvn@4001: // Shorts/Chars vector left shift kvn@4134: instruct vsll2S(vecS dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2S_imm(vecS dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll2S_reg(vecS dst, vecS src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll4S(vecD dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll4S_imm(vecD dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll4S_reg(vecD dst, vecD src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll8S(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll8S_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVS dst shift)); kvn@4001: format %{ "psllw $dst,$shift\t! left shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ psllw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll8S_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll16S_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (LShiftVS src shift)); kvn@4001: format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Integers vector left shift kvn@4134: instruct vsll2I(vecD dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVI dst shift)); kvn@4001: format %{ "pslld $dst,$shift\t! left shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ pslld($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2I_imm(vecD dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVI dst shift)); kvn@4001: format %{ "pslld $dst,$shift\t! left shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ pslld($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll2I_reg(vecD dst, vecD src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll4I(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVI dst shift)); kvn@4001: format %{ "pslld $dst,$shift\t! left shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ pslld($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll4I_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVI dst shift)); kvn@4001: format %{ "pslld $dst,$shift\t! left shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ pslld($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll4I_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll8I_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (LShiftVI src shift)); kvn@4001: format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Longs vector left shift kvn@4134: instruct vsll2L(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVL dst shift)); kvn@4001: format %{ "psllq $dst,$shift\t! left shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ psllq($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2L_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVL dst shift)); kvn@4001: format %{ "psllq $dst,$shift\t! left shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ psllq($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll2L_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVL src shift)); kvn@4001: format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (LShiftVL src shift)); kvn@4001: format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsll4L_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVL src shift)); kvn@4001: format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (LShiftVL src shift)); kvn@4001: format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // ----------------------- LogicalRightShift ----------------------------------- kvn@4001: kvn@4001: // Shorts/Chars vector logical right shift produces incorrect Java result kvn@4001: // for negative data because java code convert short value into int with kvn@4001: // sign extension before a shift. kvn@4001: kvn@4001: // Integers vector logical right shift kvn@4134: instruct vsrl2I(vecD dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVI dst shift)); kvn@4001: format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrld($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl2I_imm(vecD dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVI dst shift)); kvn@4001: format %{ "psrld $dst,$shift\t! logical right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrld($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl2I_reg(vecD dst, vecD src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl4I(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVI dst shift)); kvn@4001: format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrld($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl4I_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVI dst shift)); kvn@4001: format %{ "psrld $dst,$shift\t! logical right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrld($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl4I_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl8I_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (URShiftVI src shift)); kvn@4001: format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Longs vector logical right shift kvn@4134: instruct vsrl2L(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVL dst shift)); kvn@4001: format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ psrlq($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl2L_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVL dst shift)); kvn@4001: format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: __ psrlq($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl2L_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVL src shift)); kvn@4001: format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl2L_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (URShiftVL src shift)); kvn@4001: format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsrl4L_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVL src shift)); kvn@4001: format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsrl4L_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (URShiftVL src shift)); kvn@4001: format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // ------------------- ArithmeticRightShift ----------------------------------- kvn@4001: kvn@4001: // Shorts/Chars vector arithmetic right shift kvn@4134: instruct vsra2S(vecS dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra2S_imm(vecS dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra2S_reg(vecS dst, vecS src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra2S_reg_imm(vecS dst, vecS src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra4S(vecD dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra4S_imm(vecD dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra4S_reg(vecD dst, vecD src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra4S_reg_imm(vecD dst, vecD src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra8S(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra8S_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVS dst shift)); kvn@4001: format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: __ psraw($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra8S_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra8S_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra16S_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra16S_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 16); kvn@4001: match(Set dst (RShiftVS src shift)); kvn@4001: format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // Integers vector arithmetic right shift kvn@4134: instruct vsra2I(vecD dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVI dst shift)); kvn@4001: format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrad($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra2I_imm(vecD dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVI dst shift)); kvn@4001: format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrad($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra2I_reg(vecD dst, vecD src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra2I_reg_imm(vecD dst, vecD src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 2); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra4I(vecX dst, vecS shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVI dst shift)); kvn@4001: format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrad($dst$$XMMRegister, $shift$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra4I_imm(vecX dst, immI8 shift) %{ kvn@4001: predicate(n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVI dst shift)); kvn@4001: format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: __ psrad($dst$$XMMRegister, (int)$shift$$constant); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra4I_reg(vecX dst, vecX src, vecS shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra4I_reg_imm(vecX dst, vecX src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length() == 4); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4134: instruct vsra8I_reg(vecY dst, vecY src, vecS shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vsra8I_reg_imm(vecY dst, vecY src, immI8 shift) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length() == 8); kvn@4001: match(Set dst (RShiftVI src shift)); kvn@4001: format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // There are no longs vector arithmetic right shift instructions. kvn@4001: kvn@4001: kvn@4001: // --------------------------------- AND -------------------------------------- kvn@4001: kvn@4001: instruct vand4B(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (AndV dst src)); kvn@4001: format %{ "pand $dst,$src\t! and vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pand($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand4B_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (AndV src1 src2)); kvn@4001: format %{ "vpand $dst,$src1,$src2\t! and vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand8B(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (AndV dst src)); kvn@4001: format %{ "pand $dst,$src\t! and vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pand($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand8B_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (AndV src1 src2)); kvn@4001: format %{ "vpand $dst,$src1,$src2\t! and vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand16B(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (AndV dst src)); kvn@4001: format %{ "pand $dst,$src\t! and vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pand($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand16B_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (AndV src1 src2)); kvn@4001: format %{ "vpand $dst,$src1,$src2\t! and vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand16B_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (AndV src (LoadVector mem))); kvn@4001: format %{ "vpand $dst,$src,$mem\t! and vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand32B_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (AndV src1 src2)); kvn@4001: format %{ "vpand $dst,$src1,$src2\t! and vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vand32B_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (AndV src (LoadVector mem))); kvn@4001: format %{ "vpand $dst,$src,$mem\t! and vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // --------------------------------- OR --------------------------------------- kvn@4001: kvn@4001: instruct vor4B(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (OrV dst src)); kvn@4001: format %{ "por $dst,$src\t! or vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ por($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor4B_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (OrV src1 src2)); kvn@4001: format %{ "vpor $dst,$src1,$src2\t! or vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor8B(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (OrV dst src)); kvn@4001: format %{ "por $dst,$src\t! or vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ por($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor8B_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (OrV src1 src2)); kvn@4001: format %{ "vpor $dst,$src1,$src2\t! or vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor16B(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (OrV dst src)); kvn@4001: format %{ "por $dst,$src\t! or vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ por($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor16B_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (OrV src1 src2)); kvn@4001: format %{ "vpor $dst,$src1,$src2\t! or vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor16B_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (OrV src (LoadVector mem))); kvn@4001: format %{ "vpor $dst,$src,$mem\t! or vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor32B_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (OrV src1 src2)); kvn@4001: format %{ "vpor $dst,$src1,$src2\t! or vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vor32B_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (OrV src (LoadVector mem))); kvn@4001: format %{ "vpor $dst,$src,$mem\t! or vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: // --------------------------------- XOR -------------------------------------- kvn@4001: kvn@4001: instruct vxor4B(vecS dst, vecS src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (XorV dst src)); kvn@4001: format %{ "pxor $dst,$src\t! xor vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pxor($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor4B_reg(vecS dst, vecS src1, vecS src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); kvn@4001: match(Set dst (XorV src1 src2)); kvn@4001: format %{ "vpxor $dst,$src1,$src2\t! xor vectors (4 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor8B(vecD dst, vecD src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (XorV dst src)); kvn@4001: format %{ "pxor $dst,$src\t! xor vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pxor($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor8B_reg(vecD dst, vecD src1, vecD src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); kvn@4001: match(Set dst (XorV src1 src2)); kvn@4001: format %{ "vpxor $dst,$src1,$src2\t! xor vectors (8 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor16B(vecX dst, vecX src) %{ kvn@4001: predicate(n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (XorV dst src)); kvn@4001: format %{ "pxor $dst,$src\t! xor vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: __ pxor($dst$$XMMRegister, $src$$XMMRegister); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor16B_reg(vecX dst, vecX src1, vecX src2) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (XorV src1 src2)); kvn@4001: format %{ "vpxor $dst,$src1,$src2\t! xor vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor16B_mem(vecX dst, vecX src, memory mem) %{ kvn@4001: predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); kvn@4001: match(Set dst (XorV src (LoadVector mem))); kvn@4001: format %{ "vpxor $dst,$src,$mem\t! xor vectors (16 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = false; kvn@4001: __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor32B_reg(vecY dst, vecY src1, vecY src2) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (XorV src1 src2)); kvn@4001: format %{ "vpxor $dst,$src1,$src2\t! xor vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: kvn@4001: instruct vxor32B_mem(vecY dst, vecY src, memory mem) %{ kvn@4001: predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); kvn@4001: match(Set dst (XorV src (LoadVector mem))); kvn@4001: format %{ "vpxor $dst,$src,$mem\t! xor vectors (32 bytes)" %} kvn@4001: ins_encode %{ kvn@4001: bool vector256 = true; kvn@4001: __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256); kvn@4001: %} kvn@4001: ins_pipe( pipe_slow ); kvn@4001: %} kvn@4001: