1.1 --- a/src/cpu/x86/vm/x86_32.ad Tue Jun 12 09:47:23 2012 -0700 1.2 +++ b/src/cpu/x86/vm/x86_32.ad Tue Jun 12 14:31:44 2012 -0700 1.3 @@ -7800,50 +7800,6 @@ 1.4 ins_pipe( ialu_reg_mem ); 1.5 %} 1.6 1.7 -// LoadLong-locked - same as a volatile long load when used with compare-swap 1.8 -instruct loadLLocked(stackSlotL dst, memory mem) %{ 1.9 - predicate(UseSSE<=1); 1.10 - match(Set dst (LoadLLocked mem)); 1.11 - 1.12 - ins_cost(200); 1.13 - format %{ "FILD $mem\t# Atomic volatile long load\n\t" 1.14 - "FISTp $dst" %} 1.15 - ins_encode(enc_loadL_volatile(mem,dst)); 1.16 - ins_pipe( fpu_reg_mem ); 1.17 -%} 1.18 - 1.19 -instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{ 1.20 - predicate(UseSSE>=2); 1.21 - match(Set dst (LoadLLocked mem)); 1.22 - effect(TEMP tmp); 1.23 - ins_cost(180); 1.24 - format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t" 1.25 - "MOVSD $dst,$tmp" %} 1.26 - ins_encode %{ 1.27 - __ movdbl($tmp$$XMMRegister, $mem$$Address); 1.28 - __ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister); 1.29 - %} 1.30 - ins_pipe( pipe_slow ); 1.31 -%} 1.32 - 1.33 -instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{ 1.34 - predicate(UseSSE>=2); 1.35 - match(Set dst (LoadLLocked mem)); 1.36 - effect(TEMP tmp); 1.37 - ins_cost(160); 1.38 - format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t" 1.39 - "MOVD $dst.lo,$tmp\n\t" 1.40 - "PSRLQ $tmp,32\n\t" 1.41 - "MOVD $dst.hi,$tmp" %} 1.42 - ins_encode %{ 1.43 - __ movdbl($tmp$$XMMRegister, $mem$$Address); 1.44 - __ movdl($dst$$Register, $tmp$$XMMRegister); 1.45 - __ psrlq($tmp$$XMMRegister, 32); 1.46 - __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister); 1.47 - %} 1.48 - ins_pipe( pipe_slow ); 1.49 -%} 1.50 - 1.51 // Conditional-store of the updated heap-top. 1.52 // Used during allocation of the shared heap. 1.53 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.