src/cpu/x86/vm/nativeInst_x86.cpp

changeset 739
dc7f315e41f7
parent 631
d1605aabd0a1
child 1907
c18cbe5936b8
     1.1 --- a/src/cpu/x86/vm/nativeInst_x86.cpp	Tue Aug 26 15:49:40 2008 -0700
     1.2 +++ b/src/cpu/x86/vm/nativeInst_x86.cpp	Wed Aug 27 00:21:55 2008 -0700
     1.3 @@ -223,49 +223,150 @@
     1.4  
     1.5  //-------------------------------------------------------------------
     1.6  
     1.7 -#ifndef AMD64
     1.8 +int NativeMovRegMem::instruction_start() const {
     1.9 +  int off = 0;
    1.10 +  u_char instr_0 = ubyte_at(off);
    1.11  
    1.12 -void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
    1.13 -  int inst_size = instruction_size;
    1.14 +  // First check to see if we have a (prefixed or not) xor
    1.15 +  if ( instr_0 >= instruction_prefix_wide_lo &&      // 0x40
    1.16 +       instr_0 <= instruction_prefix_wide_hi) { // 0x4f
    1.17 +    off++;
    1.18 +    instr_0 = ubyte_at(off);
    1.19 +  }
    1.20  
    1.21 -  // See if there's an instruction size prefix override.
    1.22 -  if ( *(address(this))   == instruction_operandsize_prefix &&
    1.23 -       *(address(this)+1) != instruction_code_xmm_code ) { // Not SSE instr
    1.24 -    inst_size += 1;
    1.25 +  if (instr_0 == instruction_code_xor) {
    1.26 +    off += 2;
    1.27 +    instr_0 = ubyte_at(off);
    1.28    }
    1.29 -  if ( *(address(this)) == instruction_extended_prefix ) inst_size += 1;
    1.30  
    1.31 -  for (int i = 0; i < instruction_size; i++) {
    1.32 -    *(new_instruction_address + i) = *(address(this) + i);
    1.33 +  // Now look for the real instruction and the many prefix/size specifiers.
    1.34 +
    1.35 +  if (instr_0 == instruction_operandsize_prefix ) {  // 0x66
    1.36 +    off++; // Not SSE instructions
    1.37 +    instr_0 = ubyte_at(off);
    1.38    }
    1.39 +
    1.40 +  if ( instr_0 == instruction_code_xmm_ss_prefix ||      // 0xf3
    1.41 +       instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
    1.42 +    off++;
    1.43 +    instr_0 = ubyte_at(off);
    1.44 +  }
    1.45 +
    1.46 +  if ( instr_0 >= instruction_prefix_wide_lo &&      // 0x40
    1.47 +       instr_0 <= instruction_prefix_wide_hi) { // 0x4f
    1.48 +    off++;
    1.49 +    instr_0 = ubyte_at(off);
    1.50 +  }
    1.51 +
    1.52 +
    1.53 +  if (instr_0 == instruction_extended_prefix ) {  // 0x0f
    1.54 +    off++;
    1.55 +  }
    1.56 +
    1.57 +  return off;
    1.58 +}
    1.59 +
    1.60 +address NativeMovRegMem::instruction_address() const {
    1.61 +  return addr_at(instruction_start());
    1.62 +}
    1.63 +
    1.64 +address NativeMovRegMem::next_instruction_address() const {
    1.65 +  address ret = instruction_address() + instruction_size;
    1.66 +  u_char instr_0 =  *(u_char*) instruction_address();
    1.67 +  switch (instr_0) {
    1.68 +  case instruction_operandsize_prefix:
    1.69 +
    1.70 +    fatal("should have skipped instruction_operandsize_prefix");
    1.71 +    break;
    1.72 +
    1.73 +  case instruction_extended_prefix:
    1.74 +    fatal("should have skipped instruction_extended_prefix");
    1.75 +    break;
    1.76 +
    1.77 +  case instruction_code_mem2reg_movslq: // 0x63
    1.78 +  case instruction_code_mem2reg_movzxb: // 0xB6
    1.79 +  case instruction_code_mem2reg_movsxb: // 0xBE
    1.80 +  case instruction_code_mem2reg_movzxw: // 0xB7
    1.81 +  case instruction_code_mem2reg_movsxw: // 0xBF
    1.82 +  case instruction_code_reg2mem:        // 0x89 (q/l)
    1.83 +  case instruction_code_mem2reg:        // 0x8B (q/l)
    1.84 +  case instruction_code_reg2memb:       // 0x88
    1.85 +  case instruction_code_mem2regb:       // 0x8a
    1.86 +
    1.87 +  case instruction_code_float_s:        // 0xd9 fld_s a
    1.88 +  case instruction_code_float_d:        // 0xdd fld_d a
    1.89 +
    1.90 +  case instruction_code_xmm_load:       // 0x10
    1.91 +  case instruction_code_xmm_store:      // 0x11
    1.92 +  case instruction_code_xmm_lpd:        // 0x12
    1.93 +    {
    1.94 +      // If there is an SIB then instruction is longer than expected
    1.95 +      u_char mod_rm = *(u_char*)(instruction_address() + 1);
    1.96 +      if ((mod_rm & 7) == 0x4) {
    1.97 +        ret++;
    1.98 +      }
    1.99 +    }
   1.100 +  case instruction_code_xor:
   1.101 +    fatal("should have skipped xor lead in");
   1.102 +    break;
   1.103 +
   1.104 +  default:
   1.105 +    fatal("not a NativeMovRegMem");
   1.106 +  }
   1.107 +  return ret;
   1.108 +
   1.109 +}
   1.110 +
   1.111 +int NativeMovRegMem::offset() const{
   1.112 +  int off = data_offset + instruction_start();
   1.113 +  u_char mod_rm = *(u_char*)(instruction_address() + 1);
   1.114 +  // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
   1.115 +  // the encoding to use an SIB byte. Which will have the nnnn
   1.116 +  // field off by one byte
   1.117 +  if ((mod_rm & 7) == 0x4) {
   1.118 +    off++;
   1.119 +  }
   1.120 +  return int_at(off);
   1.121 +}
   1.122 +
   1.123 +void NativeMovRegMem::set_offset(int x) {
   1.124 +  int off = data_offset + instruction_start();
   1.125 +  u_char mod_rm = *(u_char*)(instruction_address() + 1);
   1.126 +  // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
   1.127 +  // the encoding to use an SIB byte. Which will have the nnnn
   1.128 +  // field off by one byte
   1.129 +  if ((mod_rm & 7) == 0x4) {
   1.130 +    off++;
   1.131 +  }
   1.132 +  set_int_at(off, x);
   1.133  }
   1.134  
   1.135  void NativeMovRegMem::verify() {
   1.136    // make sure code pattern is actually a mov [reg+offset], reg instruction
   1.137    u_char test_byte = *(u_char*)instruction_address();
   1.138 -  if ( ! ( (test_byte == instruction_code_reg2memb)
   1.139 -      || (test_byte == instruction_code_mem2regb)
   1.140 -      || (test_byte == instruction_code_mem2regl)
   1.141 -      || (test_byte == instruction_code_reg2meml)
   1.142 -      || (test_byte == instruction_code_mem2reg_movzxb )
   1.143 -      || (test_byte == instruction_code_mem2reg_movzxw )
   1.144 -      || (test_byte == instruction_code_mem2reg_movsxb )
   1.145 -      || (test_byte == instruction_code_mem2reg_movsxw )
   1.146 -      || (test_byte == instruction_code_float_s)
   1.147 -      || (test_byte == instruction_code_float_d)
   1.148 -      || (test_byte == instruction_code_long_volatile) ) )
   1.149 -  {
   1.150 -    u_char byte1 = ((u_char*)instruction_address())[1];
   1.151 -    u_char byte2 = ((u_char*)instruction_address())[2];
   1.152 -    if ((test_byte != instruction_code_xmm_ss_prefix &&
   1.153 -         test_byte != instruction_code_xmm_sd_prefix &&
   1.154 -         test_byte != instruction_operandsize_prefix) ||
   1.155 -        byte1 != instruction_code_xmm_code ||
   1.156 -        (byte2 != instruction_code_xmm_load &&
   1.157 -         byte2 != instruction_code_xmm_lpd  &&
   1.158 -         byte2 != instruction_code_xmm_store)) {
   1.159 +  switch (test_byte) {
   1.160 +    case instruction_code_reg2memb:  // 0x88 movb a, r
   1.161 +    case instruction_code_reg2mem:   // 0x89 movl a, r (can be movq in 64bit)
   1.162 +    case instruction_code_mem2regb:  // 0x8a movb r, a
   1.163 +    case instruction_code_mem2reg:   // 0x8b movl r, a (can be movq in 64bit)
   1.164 +      break;
   1.165 +
   1.166 +    case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
   1.167 +    case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
   1.168 +    case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
   1.169 +    case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
   1.170 +    case instruction_code_mem2reg_movsxw: // 0xbf  movswl r, a (movsxw)
   1.171 +      break;
   1.172 +
   1.173 +    case instruction_code_float_s:   // 0xd9 fld_s a
   1.174 +    case instruction_code_float_d:   // 0xdd fld_d a
   1.175 +    case instruction_code_xmm_load:  // 0x10 movsd xmm, a
   1.176 +    case instruction_code_xmm_store: // 0x11 movsd a, xmm
   1.177 +    case instruction_code_xmm_lpd:   // 0x12 movlpd xmm, a
   1.178 +      break;
   1.179 +
   1.180 +    default:
   1.181            fatal ("not a mov [reg+offs], reg instruction");
   1.182 -    }
   1.183    }
   1.184  }
   1.185  
   1.186 @@ -279,7 +380,14 @@
   1.187  void NativeLoadAddress::verify() {
   1.188    // make sure code pattern is actually a mov [reg+offset], reg instruction
   1.189    u_char test_byte = *(u_char*)instruction_address();
   1.190 -  if ( ! (test_byte == instruction_code) ) {
   1.191 +#ifdef _LP64
   1.192 +  if ( (test_byte == instruction_prefix_wide ||
   1.193 +        test_byte == instruction_prefix_wide_extended) ) {
   1.194 +    test_byte = *(u_char*)(instruction_address() + 1);
   1.195 +  }
   1.196 +#endif // _LP64
   1.197 +  if ( ! ((test_byte == lea_instruction_code)
   1.198 +          LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
   1.199      fatal ("not a lea reg, [reg+offs] instruction");
   1.200    }
   1.201  }
   1.202 @@ -289,8 +397,6 @@
   1.203    tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
   1.204  }
   1.205  
   1.206 -#endif // !AMD64
   1.207 -
   1.208  //--------------------------------------------------------------------------------
   1.209  
   1.210  void NativeJump::verify() {

mercurial