src/cpu/x86/vm/assembler_x86.hpp

changeset 3882
8c92982cbbc4
parent 3844
e7715c222897
child 3929
2c368ea3e844
     1.1 --- a/src/cpu/x86/vm/assembler_x86.hpp	Thu Jun 14 14:59:52 2012 -0700
     1.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp	Fri Jun 15 01:25:19 2012 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -591,8 +591,9 @@
    1.11  
    1.12    void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
    1.13                    VexSimdPrefix pre, bool vector256 = false) {
    1.14 -     vex_prefix(src, nds->encoding(), dst->encoding(),
    1.15 -                pre, VEX_OPCODE_0F, false, vector256);
    1.16 +    int dst_enc = dst->encoding();
    1.17 +    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    1.18 +    vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
    1.19    }
    1.20  
    1.21    int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
    1.22 @@ -600,9 +601,12 @@
    1.23                               bool vex_w, bool vector256);
    1.24  
    1.25    int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
    1.26 -                             VexSimdPrefix pre, bool vector256 = false) {
    1.27 -     return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
    1.28 -                                  pre, VEX_OPCODE_0F, false, vector256);
    1.29 +                             VexSimdPrefix pre, bool vector256 = false,
    1.30 +                             VexOpcode opc = VEX_OPCODE_0F) {
    1.31 +    int src_enc = src->encoding();
    1.32 +    int dst_enc = dst->encoding();
    1.33 +    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    1.34 +    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
    1.35    }
    1.36  
    1.37    void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
    1.38 @@ -1261,6 +1265,7 @@
    1.39    void movdl(XMMRegister dst, Register src);
    1.40    void movdl(Register dst, XMMRegister src);
    1.41    void movdl(XMMRegister dst, Address src);
    1.42 +  void movdl(Address dst, XMMRegister src);
    1.43  
    1.44    // Move Double Quadword
    1.45    void movdq(XMMRegister dst, Register src);
    1.46 @@ -1274,6 +1279,14 @@
    1.47    void movdqu(XMMRegister dst, Address src);
    1.48    void movdqu(XMMRegister dst, XMMRegister src);
    1.49  
    1.50 +  // Move Unaligned 256bit Vector
    1.51 +  void vmovdqu(Address dst, XMMRegister src);
    1.52 +  void vmovdqu(XMMRegister dst, Address src);
    1.53 +  void vmovdqu(XMMRegister dst, XMMRegister src);
    1.54 +
    1.55 +  // Move lower 64bit to high 64bit in 128bit register
    1.56 +  void movlhps(XMMRegister dst, XMMRegister src);
    1.57 +
    1.58    void movl(Register dst, int32_t imm32);
    1.59    void movl(Address dst, int32_t imm32);
    1.60    void movl(Register dst, Register src);
    1.61 @@ -1615,6 +1628,17 @@
    1.62    void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
    1.63    void vxorps(XMMRegister dst, XMMRegister nds, Address src);
    1.64  
    1.65 +  // AVX Vector instrucitons.
    1.66 +  void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
    1.67 +  void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
    1.68 +  void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
    1.69 +
    1.70 +  // AVX instruction which is used to clear upper 128 bits of YMM registers and
    1.71 +  // to avoid transaction penalty between AVX and SSE states. There is no
    1.72 +  // penalty if legacy SSE instructions are encoded using VEX prefix because
    1.73 +  // they always clear upper 128 bits. It should be used before calling
    1.74 +  // runtime code and native libraries.
    1.75 +  void vzeroupper();
    1.76  
    1.77   protected:
    1.78    // Next instructions require address alignment 16 bytes SSE mode.
    1.79 @@ -2529,9 +2553,13 @@
    1.80    void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
    1.81    void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
    1.82  
    1.83 +  // AVX Vector instructions
    1.84 +
    1.85 +  void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
    1.86    void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
    1.87    void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
    1.88  
    1.89 +  void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
    1.90    void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
    1.91    void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
    1.92  

mercurial