1.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jul 02 07:51:31 2013 +0200 1.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jul 02 20:42:12 2013 -0400 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -899,6 +899,11 @@ 1.11 void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } 1.12 void movdqu(XMMRegister dst, AddressLiteral src); 1.13 1.14 + // Move Aligned Double Quadword 1.15 + void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1.16 + void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1.17 + void movdqa(XMMRegister dst, AddressLiteral src); 1.18 + 1.19 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1.20 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1.21 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1.22 @@ -1027,6 +1032,16 @@ 1.23 Assembler::vinsertf128h(dst, nds, src); 1.24 } 1.25 1.26 + // Carry-Less Multiplication Quadword 1.27 + void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1.28 + // 0x00 - multiply lower 64 bits [0:63] 1.29 + Assembler::vpclmulqdq(dst, nds, src, 0x00); 1.30 + } 1.31 + void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1.32 + // 0x11 - multiply upper 64 bits [64:127] 1.33 + Assembler::vpclmulqdq(dst, nds, src, 0x11); 1.34 + } 1.35 + 1.36 // Data 1.37 1.38 void cmov32( Condition cc, Register dst, Address src); 1.39 @@ -1143,6 +1158,16 @@ 1.40 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1.41 XMMRegister tmp4, Register tmp5, Register result); 1.42 1.43 + // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1.44 + void update_byte_crc32(Register crc, Register val, Register table); 1.45 + void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1.46 + // Fold 128-bit data chunk 1.47 + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1.48 + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1.49 + // Fold 8-bit data 1.50 + void fold_8bit_crc32(Register crc, Register table, Register tmp); 1.51 + void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1.52 + 1.53 #undef VIRTUAL 1.54 1.55 };