Wed, 31 Oct 2018 14:29:13 +0800
#7520 added instruct matching StorePConditional and LoadPLocked
src/cpu/mips/vm/mips_64.ad | file | annotate | diff | comparison | revisions |
1.1 --- a/src/cpu/mips/vm/mips_64.ad Tue Oct 30 18:43:19 2018 +0800 1.2 +++ b/src/cpu/mips/vm/mips_64.ad Wed Oct 31 14:29:13 2018 +0800 1.3 @@ -2689,6 +2689,82 @@ 1.4 } 1.5 %} 1.6 1.7 + // Load acquire. 1.8 + // load_P_enc + sync 1.9 + enc_class load_P_enc_ac (mRegP dst, memory mem) %{ 1.10 + MacroAssembler _masm(&cbuf); 1.11 + int dst = $dst$$reg; 1.12 + int base = $mem$$base; 1.13 + int index = $mem$$index; 1.14 + int scale = $mem$$scale; 1.15 + int disp = $mem$$disp; 1.16 + relocInfo::relocType disp_reloc = $mem->disp_reloc(); 1.17 + assert(disp_reloc == relocInfo::none, "cannot have disp"); 1.18 + 1.19 + if( index != 0 ) { 1.20 + if ( UseLoongsonISA ) { 1.21 + if ( Assembler::is_simm(disp, 8) ) { 1.22 + if ( scale != 0 ) { 1.23 + __ dsll(AT, as_Register(index), scale); 1.24 + __ gsldx(as_Register(dst), as_Register(base), AT, disp); 1.25 + } else { 1.26 + __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp); 1.27 + } 1.28 + } else if ( Assembler::is_simm16(disp) ){ 1.29 + if ( scale != 0 ) { 1.30 + __ dsll(AT, as_Register(index), scale); 1.31 + __ daddu(AT, AT, as_Register(base)); 1.32 + } else { 1.33 + __ daddu(AT, as_Register(index), as_Register(base)); 1.34 + } 1.35 + __ ld(as_Register(dst), AT, disp); 1.36 + } else { 1.37 + if ( scale != 0 ) { 1.38 + __ dsll(AT, as_Register(index), scale); 1.39 + __ move(T9, disp); 1.40 + __ daddu(AT, AT, T9); 1.41 + } else { 1.42 + __ move(T9, disp); 1.43 + __ daddu(AT, as_Register(index), T9); 1.44 + } 1.45 + __ gsldx(as_Register(dst), as_Register(base), AT, 0); 1.46 + } 1.47 + } else { //not use loongson isa 1.48 + if (scale == 0) { 1.49 + __ daddu(AT, as_Register(base), as_Register(index)); 1.50 + } else { 1.51 + __ dsll(AT, as_Register(index), scale); 1.52 + __ daddu(AT, as_Register(base), AT); 1.53 + } 1.54 + if( Assembler::is_simm16(disp) ) { 1.55 + __ ld(as_Register(dst), AT, disp); 1.56 + } else { 1.57 + __ set64(T9, disp); 1.58 + __ daddu(AT, AT, T9); 1.59 + __ ld(as_Register(dst), AT, 0); 1.60 + } 1.61 + } 1.62 + } else { 1.63 + if ( UseLoongsonISA ) { 1.64 + if ( Assembler::is_simm16(disp) ){ 1.65 + __ ld(as_Register(dst), as_Register(base), disp); 1.66 + } else { 1.67 + __ set64(T9, disp); 1.68 + __ gsldx(as_Register(dst), as_Register(base), T9, 0); 1.69 + } 1.70 + } else { //not use loongson isa 1.71 + if( Assembler::is_simm16(disp) ) { 1.72 + __ ld(as_Register(dst), as_Register(base), disp); 1.73 + } else { 1.74 + __ set64(T9, disp); 1.75 + __ daddu(AT, as_Register(base), T9); 1.76 + __ ld(as_Register(dst), AT, 0); 1.77 + } 1.78 + } 1.79 + } 1.80 + __ sync(); 1.81 + %} 1.82 + 1.83 enc_class store_P_reg_enc (memory mem, mRegP src) %{ 1.84 MacroAssembler _masm(&cbuf); 1.85 int src = $src$$reg; 1.86 @@ -14008,6 +14084,33 @@ 1.87 ins_pipe( pipe_slow ); 1.88 %} 1.89 1.90 +// Conditional-store of the updated heap-top. 1.91 +// Used during allocation of the shared heap. 1.92 + 1.93 +instruct storePConditional( memory heap_top_ptr, mRegP oldval, mRegP newval, FlagsReg cr ) %{ 1.94 + match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); 1.95 + 1.96 + format %{ "CMPXCHG $heap_top_ptr, $newval\t# (ptr) @storePConditional " 1.97 + "If $oldval == $heap_top_ptr then store $newval into $heap_top_ptr" %} 1.98 + ins_encode%{ 1.99 + Register oldval = $oldval$$Register; 1.100 + Register newval = $newval$$Register; 1.101 + Address addr(as_Register($heap_top_ptr$$base), $heap_top_ptr$$disp); 1.102 + 1.103 + int index = $heap_top_ptr$$index; 1.104 + int scale = $heap_top_ptr$$scale; 1.105 + int disp = $heap_top_ptr$$disp; 1.106 + 1.107 + guarantee(Assembler::is_simm16(disp), ""); 1.108 + 1.109 + if( index != 0 ) { 1.110 + __ stop("in storePConditional: index != 0"); 1.111 + } else { 1.112 + __ cmpxchg(newval, addr, oldval); 1.113 + } 1.114 + %} 1.115 + ins_pipe( long_memory_op ); 1.116 +%} 1.117 1.118 // Conditional-store of an int value. 1.119 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel. 1.120 @@ -14077,6 +14180,19 @@ 1.121 ins_pipe( long_memory_op ); 1.122 %} 1.123 1.124 +// Implement LoadPLocked. Must be ordered against changes of the memory location 1.125 +// by storePConditional. 1.126 +instruct loadPLocked(mRegP dst, memory mem) %{ 1.127 + match(Set dst (LoadPLocked mem)); 1.128 + ins_cost(MEMORY_REF_COST); 1.129 + 1.130 + format %{ "ld $dst, $mem #@loadPLocked\n\t" 1.131 + "sync" %} 1.132 + size(12); 1.133 + ins_encode (load_P_enc_ac(dst, mem)); 1.134 + ins_pipe( ialu_loadI ); 1.135 +%} 1.136 + 1.137 1.138 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{ 1.139 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));