src/cpu/mips/vm/mips_64.ad

changeset 385
ee9f465c10a9
parent 384
2d935408fc69
child 386
f50649f9eda6
     1.1 --- a/src/cpu/mips/vm/mips_64.ad	Wed Mar 22 09:33:20 2017 -0400
     1.2 +++ b/src/cpu/mips/vm/mips_64.ad	Wed Mar 22 11:43:05 2017 -0400
     1.3 @@ -1753,7 +1753,7 @@
     1.4    %}
     1.5  
     1.6    //Load byte unsigned
     1.7 -  enc_class load_UB_enc (mRegI dst, memory mem) %{
     1.8 +  enc_class load_UB_enc (mRegI dst, umemory mem) %{
     1.9       MacroAssembler _masm(&cbuf);
    1.10       int  dst = $dst$$reg;
    1.11       int  base = $mem$$base;
    1.12 @@ -1761,29 +1761,8 @@
    1.13       int  scale = $mem$$scale;
    1.14       int  disp = $mem$$disp;
    1.15  
    1.16 -     if( index != 0 ) {
    1.17 -        if (scale == 0) {
    1.18 -           __ daddu(AT, as_Register(base), as_Register(index));
    1.19 -        } else {
    1.20 -           __ dsll(AT, as_Register(index), scale);
    1.21 -           __ daddu(AT, as_Register(base), AT);
    1.22 -        }
    1.23 -        if( Assembler::is_simm16(disp) ) { 
    1.24 -           __ lbu(as_Register(dst), AT, disp);
    1.25 -        } else {
    1.26 -           __ move(T9, disp);
    1.27 -           __ daddu(AT, AT, T9); 
    1.28 -           __ lbu(as_Register(dst), AT, 0);
    1.29 -        }    
    1.30 -     } else {
    1.31 -        if( Assembler::is_simm16(disp) ) { 
    1.32 -           __ lbu(as_Register(dst), as_Register(base), disp);
    1.33 -        } else {
    1.34 -           __ move(T9, disp);   
    1.35 -           __ daddu(AT, as_Register(base), T9); 
    1.36 -           __ lbu(as_Register(dst), AT, 0);
    1.37 -        }    
    1.38 -     }
    1.39 +     assert(index == 0, "no index");
    1.40 +     __ lbu(as_Register(dst), as_Register(base), disp);
    1.41    %}
    1.42  
    1.43    enc_class store_B_reg_enc (memory mem, mRegI src) %{
    1.44 @@ -1795,334 +1774,58 @@
    1.45       int  disp = $mem$$disp;
    1.46  
    1.47       if( index != 0 ) {
    1.48 -        if (scale == 0) {
    1.49 -           if( Assembler::is_simm(disp, 8) ) { 
    1.50 -              if (UseLoongsonISA) {
    1.51 -                 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
    1.52 -              } else {
    1.53 -                 __ addu(AT, as_Register(base), as_Register(index));
    1.54 -                 __ sb(as_Register(src), AT, disp);
    1.55 -              }
    1.56 -           } else if( Assembler::is_simm16(disp) ) { 
    1.57 -              __ addu(AT, as_Register(base), as_Register(index));
    1.58 -              __ sb(as_Register(src), AT, disp);
    1.59 -           } else {
    1.60 -              __ addu(AT, as_Register(base), as_Register(index));
    1.61 -              __ move(T9, disp);
    1.62 -              if (UseLoongsonISA) {
    1.63 -                 __ gssbx(as_Register(src), AT, T9, 0);
    1.64 -              } else {
    1.65 -                 __ addu(AT, AT, T9); 
    1.66 -                 __ sb(as_Register(src), AT, 0);
    1.67 -              }
    1.68 -           } 
    1.69 -        } else {
    1.70 -           __ dsll(AT, as_Register(index), scale);
    1.71 -           if( Assembler::is_simm(disp, 8) ) { 
    1.72 -              if (UseLoongsonISA) {
    1.73 -                 __ gssbx(as_Register(src), AT, as_Register(base), disp);
    1.74 -              } else {
    1.75 -                 __ addu(AT, as_Register(base), AT);
    1.76 -                 __ sb(as_Register(src), AT, disp);
    1.77 -              }
    1.78 -           } else if( Assembler::is_simm16(disp) ) { 
    1.79 -              __ addu(AT, as_Register(base), AT);
    1.80 -              __ sb(as_Register(src), AT, disp);
    1.81 -           } else {
    1.82 -              __ addu(AT, as_Register(base), AT);
    1.83 -              __ move(T9, disp);
    1.84 -              if (UseLoongsonISA) {
    1.85 -                 __ gssbx(as_Register(src), AT, T9, 0);
    1.86 -              } else {
    1.87 -                 __ addu(AT, AT, T9); 
    1.88 -                 __ sb(as_Register(src), AT, 0);
    1.89 -              }
    1.90 -           }    
    1.91 -        }
    1.92 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
    1.93 +        __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
    1.94       } else {
    1.95 -        if( Assembler::is_simm16(disp) ) { 
    1.96 -           __ sb(as_Register(src), as_Register(base), disp);
    1.97 -        } else {
    1.98 -           __ move(T9, disp);   
    1.99 -           if (UseLoongsonISA) {
   1.100 -              __ gssbx(as_Register(src), as_Register(base), T9, 0);
   1.101 -           } else {
   1.102 -              __ addu(AT, as_Register(base), T9); 
   1.103 -              __ sb(as_Register(src), AT, 0);
   1.104 -           }
   1.105 -        }    
   1.106 +        __ sb(as_Register(src), as_Register(base), disp);
   1.107       }
   1.108    %}
   1.109  
   1.110 -  enc_class store_B_immI_enc (memory mem, immI8 src) %{
   1.111 +  enc_class store_B0_enc (memory mem) %{
   1.112       MacroAssembler _masm(&cbuf);
   1.113       int  base = $mem$$base;
   1.114       int  index = $mem$$index;
   1.115       int  scale = $mem$$scale;
   1.116       int  disp = $mem$$disp;
   1.117 -     int value = $src$$constant;
   1.118  
   1.119       if( index != 0 ) {
   1.120 -        if (!UseLoongsonISA) {
   1.121 -           if (scale == 0) {
   1.122 -              __ daddu(AT, as_Register(base), as_Register(index));
   1.123 -           } else {
   1.124 -              __ dsll(AT, as_Register(index), scale);
   1.125 -              __ daddu(AT, as_Register(base), AT);
   1.126 -           }
   1.127 -           if( Assembler::is_simm16(disp) ) { 
   1.128 -              if (value == 0) {
   1.129 -                 __ sb(R0, AT, disp);
   1.130 -              } else {
   1.131 -                 __ move(T9, value);
   1.132 -                 __ sb(T9, AT, disp);
   1.133 -              }
   1.134 -           } else {
   1.135 -              if (value == 0) {
   1.136 -                 __ move(T9, disp);
   1.137 -                 __ daddu(AT, AT, T9); 
   1.138 -                 __ sb(R0, AT, 0);
   1.139 -              } else {
   1.140 -                 __ move(T9, disp);
   1.141 -                 __ daddu(AT, AT, T9); 
   1.142 -                 __ move(T9, value);
   1.143 -                 __ sb(T9, AT, 0);
   1.144 -              }
   1.145 -           }    
   1.146 -        } else {
   1.147 -
   1.148 -           if (scale == 0) {
   1.149 -              if( Assembler::is_simm(disp, 8) ) { 
   1.150 -                 if (value == 0) {
   1.151 -                    __ gssbx(R0, as_Register(base), as_Register(index), disp);
   1.152 -                 } else {
   1.153 -                    __ move(T9, value);
   1.154 -                    __ gssbx(T9, as_Register(base), as_Register(index), disp);
   1.155 -                 }
   1.156 -              } else if( Assembler::is_simm16(disp) ) { 
   1.157 -                 __ daddu(AT, as_Register(base), as_Register(index));
   1.158 -                 if (value == 0) {
   1.159 -                    __ sb(R0, AT, disp);
   1.160 -                 } else {
   1.161 -                    __ move(T9, value);
   1.162 -                    __ sb(T9, AT, disp);
   1.163 -                 }
   1.164 -              } else {
   1.165 -                 if (value == 0) {
   1.166 -                    __ daddu(AT, as_Register(base), as_Register(index));
   1.167 -                    __ move(T9, disp);
   1.168 -                    __ gssbx(R0, AT, T9, 0);
   1.169 -                 } else {
   1.170 -                    __ move(AT, disp);
   1.171 -                    __ move(T9, value);
   1.172 -                    __ daddu(AT, as_Register(base), AT);
   1.173 -                    __ gssbx(T9, AT, as_Register(index), 0);
   1.174 -                 }
   1.175 -              }    
   1.176 -
   1.177 -           } else {
   1.178 -
   1.179 -              if( Assembler::is_simm(disp, 8) ) { 
   1.180 -                 __ dsll(AT, as_Register(index), scale);
   1.181 -                 if (value == 0) {
   1.182 -                    __ gssbx(R0, as_Register(base), AT, disp);
   1.183 -                 } else {
   1.184 -                    __ move(T9, value);
   1.185 -                    __ gssbx(T9, as_Register(base), AT, disp);
   1.186 -                 }
   1.187 -              } else if( Assembler::is_simm16(disp) ) { 
   1.188 -                 __ dsll(AT, as_Register(index), scale);
   1.189 -                 __ daddu(AT, as_Register(base), AT);
   1.190 -                 if (value == 0) {
   1.191 -                    __ sb(R0, AT, disp);
   1.192 -                 } else {
   1.193 -                    __ move(T9, value);
   1.194 -                    __ sb(T9, AT, disp);
   1.195 -                 }
   1.196 -              } else {
   1.197 -                 __ dsll(AT, as_Register(index), scale);
   1.198 -                 if (value == 0) {
   1.199 -                    __ daddu(AT, as_Register(base), AT);
   1.200 -                    __ move(T9, disp);
   1.201 -                    __ gssbx(R0, AT, T9, 0);
   1.202 -                 } else {
   1.203 -                    __ move(T9, disp);
   1.204 -                    __ daddu(AT, AT, T9); 
   1.205 -                    __ move(T9, value);
   1.206 -                    __ gssbx(T9, as_Register(base), AT, 0);
   1.207 -                 }
   1.208 -              }    
   1.209 -           }
   1.210 -        }
   1.211 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.212 +        __ gssbx(R0, as_Register(base), as_Register(index), disp);
   1.213       } else {
   1.214 -        if( Assembler::is_simm16(disp) ) { 
   1.215 -           if (value == 0) {
   1.216 -              __ sb(R0, as_Register(base), disp);
   1.217 -           } else {
   1.218 -              __ move(AT, value);
   1.219 -              __ sb(AT, as_Register(base), disp);
   1.220 -           }
   1.221 -        } else {
   1.222 -           if (value == 0) {
   1.223 -              __ move(T9, disp);   
   1.224 -              if (UseLoongsonISA) {
   1.225 -                __ gssbx(R0, as_Register(base), T9, 0);
   1.226 -              } else {
   1.227 -                __ daddu(AT, as_Register(base), T9); 
   1.228 -                __ sb(R0, AT, 0);
   1.229 -              }
   1.230 -           } else {
   1.231 -              __ move(T9, disp);   
   1.232 -              if (UseLoongsonISA) {
   1.233 -                __ move(AT, value);
   1.234 -                __ gssbx(AT, as_Register(base), T9, 0);
   1.235 -              } else {
   1.236 -                __ daddu(AT, as_Register(base), T9); 
   1.237 -                __ move(T9, value);
   1.238 -                __ sb(T9, AT, 0);
   1.239 -              }
   1.240 -           }
   1.241 -        }    
   1.242 +        __ sb(R0, as_Register(base), disp);
   1.243       }
   1.244    %}
   1.245  
   1.246 -
   1.247 -  enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
   1.248 +  enc_class store_B_reg_sync_enc (memory mem, mRegI src) %{
   1.249 +     MacroAssembler _masm(&cbuf);
   1.250 +     int  src = $src$$reg;
   1.251 +     int  base = $mem$$base;
   1.252 +     int  index = $mem$$index;
   1.253 +     int  scale = $mem$$scale;
   1.254 +     int  disp = $mem$$disp;
   1.255 +
   1.256 +     if( index != 0 ) {
   1.257 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.258 +        __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
   1.259 +     } else {
   1.260 +        __ sb(as_Register(src), as_Register(base), disp);
   1.261 +     }
   1.262 +     __ sync();
   1.263 +  %}
   1.264 +
   1.265 +  enc_class store_B0_sync_enc (memory mem) %{
   1.266       MacroAssembler _masm(&cbuf);
   1.267       int  base = $mem$$base;
   1.268       int  index = $mem$$index;
   1.269       int  scale = $mem$$scale;
   1.270       int  disp = $mem$$disp;
   1.271 -     int value = $src$$constant;
   1.272  
   1.273       if( index != 0 ) {
   1.274 -		 if ( UseLoongsonISA ) {
   1.275 -			if ( Assembler::is_simm(disp,8) ) {
   1.276 -				if ( scale == 0 ) {
   1.277 -					if ( value == 0 ) {
   1.278 -						__ gssbx(R0, as_Register(base), as_Register(index), disp);
   1.279 -					} else {
   1.280 -						__ move(AT, value);
   1.281 -						__ gssbx(AT, as_Register(base), as_Register(index), disp);
   1.282 -					}
   1.283 -				} else {
   1.284 -					__ dsll(AT, as_Register(index), scale);
   1.285 -					if ( value == 0 ) {
   1.286 -						__ gssbx(R0, as_Register(base), AT, disp);
   1.287 -					} else {
   1.288 -						__ move(T9, value);
   1.289 -						__ gssbx(T9, as_Register(base), AT, disp);
   1.290 -					}
   1.291 -				}
   1.292 -			} else if ( Assembler::is_simm16(disp) ) {
   1.293 -				if ( scale == 0 ) {
   1.294 -					__ daddu(AT, as_Register(base), as_Register(index));
   1.295 -					if ( value == 0 ){
   1.296 -						__ sb(R0, AT, disp);
   1.297 -					} else {
   1.298 -						__ move(T9, value);
   1.299 -						__ sb(T9, AT, disp);
   1.300 -					}
   1.301 -				} else {
   1.302 -					__ dsll(AT, as_Register(index), scale);
   1.303 -					__ daddu(AT, as_Register(base), AT);
   1.304 -					if ( value == 0 ) {
   1.305 -						__ sb(R0, AT, disp);
   1.306 -					} else {
   1.307 -						__ move(T9, value);
   1.308 -						__ sb(T9, AT, disp);
   1.309 -					}
   1.310 -				}
   1.311 -			} else {
   1.312 -				if ( scale == 0 ) {
   1.313 -					__ move(AT, disp);
   1.314 -					__ daddu(AT, as_Register(index), AT);
   1.315 -					if ( value == 0 ) {
   1.316 -						__ gssbx(R0, as_Register(base), AT, 0);
   1.317 -					} else {
   1.318 -						__ move(T9, value);
   1.319 -						__ gssbx(T9, as_Register(base), AT, 0);
   1.320 -					}
   1.321 -				} else {
   1.322 -					__ dsll(AT, as_Register(index), scale);
   1.323 -					__ move(T9, disp);
   1.324 -					__ daddu(AT, AT, T9);
   1.325 -					if ( value == 0 ) {
   1.326 -						__ gssbx(R0, as_Register(base), AT, 0);
   1.327 -					} else {
   1.328 -						__ move(T9, value);
   1.329 -						__ gssbx(T9, as_Register(base), AT, 0);
   1.330 -					}
   1.331 -				}
   1.332 -			}
   1.333 -		 } else { //not use loongson isa
   1.334 -		    if (scale == 0) {
   1.335 -			   __ daddu(AT, as_Register(base), as_Register(index));
   1.336 -		    } else {
   1.337 -			   __ dsll(AT, as_Register(index), scale);
   1.338 -			   __ daddu(AT, as_Register(base), AT);
   1.339 -		    }
   1.340 -		    if( Assembler::is_simm16(disp) ) { 
   1.341 -			   if (value == 0) {
   1.342 -			      __ sb(R0, AT, disp);
   1.343 -			   } else {
   1.344 -		          __ move(T9, value);
   1.345 -				  __ sb(T9, AT, disp);
   1.346 -			  }
   1.347 -			} else {
   1.348 -		       if (value == 0) {
   1.349 -	              __ move(T9, disp);
   1.350 -				  __ daddu(AT, AT, T9); 
   1.351 -			      __ sb(R0, AT, 0);
   1.352 -		       } else {
   1.353 -	              __ move(T9, disp);
   1.354 -				  __ daddu(AT, AT, T9); 
   1.355 -			      __ move(T9, value);
   1.356 -		          __ sb(T9, AT, 0);
   1.357 -	           }
   1.358 -			}
   1.359 -		}    
   1.360 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.361 +        __ gssbx(R0, as_Register(base), as_Register(index), disp);
   1.362       } else {
   1.363 -		 if ( UseLoongsonISA ){
   1.364 -			if ( Assembler::is_simm16(disp) ){
   1.365 -				if ( value == 0 ) {
   1.366 -					__ sb(R0, as_Register(base), disp);
   1.367 -				} else {
   1.368 -					__ move(AT, value);
   1.369 -					__ sb(AT, as_Register(base), disp);
   1.370 -				}
   1.371 -			} else {
   1.372 -				__ move(AT, disp);
   1.373 -				if ( value == 0 ) {
   1.374 -					__ gssbx(R0, as_Register(base), AT, 0);
   1.375 -				} else {
   1.376 -					__ move(T9, value);
   1.377 -					__ gssbx(T9, as_Register(base), AT, 0);
   1.378 -				}
   1.379 -			}
   1.380 -		 } else {
   1.381 -		    if( Assembler::is_simm16(disp) ) { 
   1.382 -	           if (value == 0) {
   1.383 -			      __ sb(R0, as_Register(base), disp);
   1.384 -		       } else {
   1.385 -	              __ move(AT, value);
   1.386 -				  __ sb(AT, as_Register(base), disp);
   1.387 -			   }
   1.388 -		    } else {
   1.389 -	           if (value == 0) {
   1.390 -				  __ move(T9, disp);   
   1.391 -			      __ daddu(AT, as_Register(base), T9); 
   1.392 -		          __ sb(R0, AT, 0);
   1.393 -	           } else {
   1.394 -				  __ move(T9, disp);   
   1.395 -			      __ daddu(AT, as_Register(base), T9); 
   1.396 -		          __ move(T9, value);
   1.397 -	              __ sb(T9, AT, 0);
   1.398 -			   }
   1.399 -		    }    
   1.400 -		}
   1.401 +        __ sb(R0, as_Register(base), disp);
   1.402       }
   1.403 -
   1.404       __ sync();
   1.405    %}
   1.406  
   1.407 @@ -2136,72 +1839,15 @@
   1.408       int  disp = $mem$$disp;
   1.409  
   1.410       if( index != 0 ) {
   1.411 -		 if ( UseLoongsonISA ) {
   1.412 -			if ( Assembler::is_simm(disp, 8) ) {
   1.413 -				if (scale == 0) {
   1.414 -					__ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.415 -				} else {
   1.416 -					__ dsll(AT, as_Register(index), scale);
   1.417 -					__ gslhx(as_Register(dst), as_Register(base), AT, disp);
   1.418 -				}
   1.419 -			} else if ( Assembler::is_simm16(disp) ) {
   1.420 -				if (scale == 0) {
   1.421 -					__ daddu(AT, as_Register(base), as_Register(index));
   1.422 -					__ lh(as_Register(dst), AT, disp);
   1.423 -				} else {
   1.424 -					__ dsll(AT, as_Register(index), scale);
   1.425 -					__ daddu(AT, as_Register(base), AT);
   1.426 -					__ lh(as_Register(dst), AT, disp);
   1.427 -				}
   1.428 -			} else {
   1.429 -				if (scale == 0) {
   1.430 -					__ move(AT, disp);
   1.431 -					__ daddu(AT, as_Register(index), AT);
   1.432 -					__ gslhx(as_Register(dst), as_Register(base), AT, 0);
   1.433 -				} else {
   1.434 -					__ dsll(AT, as_Register(index), scale);
   1.435 -					__ move(T9, disp);
   1.436 -					__ daddu(AT, AT, T9);
   1.437 -					__ gslhx(as_Register(dst), as_Register(base), AT, 0);
   1.438 -				}
   1.439 -			}
   1.440 -		 } else { // not use loongson isa
   1.441 -		    if (scale == 0) {
   1.442 -			   __ daddu(AT, as_Register(base), as_Register(index));
   1.443 -		    } else {
   1.444 -			   __ dsll(AT, as_Register(index), scale);
   1.445 -		       __ daddu(AT, as_Register(base), AT);
   1.446 -			}
   1.447 -		    if( Assembler::is_simm16(disp) ) { 
   1.448 -		       __ lh(as_Register(dst), AT, disp);
   1.449 -	        } else {
   1.450 -	           __ move(T9, disp);
   1.451 -			   __ daddu(AT, AT, T9); 
   1.452 -		       __ lh(as_Register(dst), AT, 0);
   1.453 -	        }    
   1.454 -		}
   1.455 -     } else { // index is 0
   1.456 -		 if ( UseLoongsonISA ) {
   1.457 -			if ( Assembler::is_simm16(disp) ) {
   1.458 -				__ lh(as_Register(dst), as_Register(base), disp);
   1.459 -			} else {
   1.460 -				__ move(T9, disp);
   1.461 -				__ gslhx(as_Register(dst), as_Register(base), T9, 0);
   1.462 -			}
   1.463 -		 } else { //not use loongson isa
   1.464 -		    if( Assembler::is_simm16(disp) ) { 
   1.465 -			   __ lh(as_Register(dst), as_Register(base), disp);
   1.466 -		    } else {
   1.467 -	           __ move(T9, disp);   
   1.468 -			   __ daddu(AT, as_Register(base), T9); 
   1.469 -		       __ lh(as_Register(dst), AT, 0);
   1.470 -	        }    
   1.471 -		 }
   1.472 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.473 +	__ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.474 +     } else {
   1.475 +        __ lh(as_Register(dst), as_Register(base), disp);
   1.476       }
   1.477    %}
   1.478  
   1.479    // Load Char (16bit unsigned)
   1.480 -  enc_class load_C_enc (mRegI dst, memory mem) %{
   1.481 +  enc_class load_C_enc (mRegI dst, umemory mem) %{
   1.482       MacroAssembler _masm(&cbuf);
   1.483       int  dst = $dst$$reg;
   1.484       int  base = $mem$$base;
   1.485 @@ -2209,29 +1855,8 @@
   1.486       int  scale = $mem$$scale;
   1.487       int  disp = $mem$$disp;
   1.488  
   1.489 -     if( index != 0 ) {
   1.490 -        if (scale == 0) {
   1.491 -           __ daddu(AT, as_Register(base), as_Register(index));
   1.492 -        } else {
   1.493 -           __ dsll(AT, as_Register(index), scale);
   1.494 -           __ daddu(AT, as_Register(base), AT);
   1.495 -        }
   1.496 -        if( Assembler::is_simm16(disp) ) { 
   1.497 -           __ lhu(as_Register(dst), AT, disp);
   1.498 -        } else {
   1.499 -           __ move(T9, disp);
   1.500 -           __ addu(AT, AT, T9); 
   1.501 -           __ lhu(as_Register(dst), AT, 0);
   1.502 -        }    
   1.503 -     } else {
   1.504 -        if( Assembler::is_simm16(disp) ) { 
   1.505 -           __ lhu(as_Register(dst), as_Register(base), disp);
   1.506 -        } else {
   1.507 -           __ move(T9, disp);   
   1.508 -           __ daddu(AT, as_Register(base), T9); 
   1.509 -           __ lhu(as_Register(dst), AT, 0);
   1.510 -        }    
   1.511 -     }
   1.512 +     assert(index == 0, "no index");
   1.513 +     __ lhu(as_Register(dst), as_Register(base), disp);
   1.514    %}
   1.515  
   1.516    // Store Char (16bit unsigned)
   1.517 @@ -2244,50 +1869,10 @@
   1.518       int  disp = $mem$$disp;
   1.519  
   1.520       if( index != 0 ) {
   1.521 -        if( Assembler::is_simm16(disp) ) { 
   1.522 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
   1.523 -              if (scale == 0) {
   1.524 -                 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
   1.525 -              } else {
   1.526 -                 __ dsll(AT, as_Register(index), scale);
   1.527 -                 __ gsshx(as_Register(src), as_Register(base), AT, disp);
   1.528 -              }
   1.529 -           } else {
   1.530 -              if (scale == 0) {
   1.531 -                 __ addu(AT, as_Register(base), as_Register(index));
   1.532 -              } else {
   1.533 -                 __ dsll(AT, as_Register(index), scale);
   1.534 -                 __ addu(AT, as_Register(base), AT);
   1.535 -              }
   1.536 -              __ sh(as_Register(src), AT, disp);
   1.537 -           }
   1.538 -        } else {
   1.539 -           if (scale == 0) {
   1.540 -              __ addu(AT, as_Register(base), as_Register(index));
   1.541 -           } else {
   1.542 -              __ dsll(AT, as_Register(index), scale);
   1.543 -              __ addu(AT, as_Register(base), AT);
   1.544 -           }
   1.545 -           __ move(T9, disp);
   1.546 -           if( UseLoongsonISA ) {
   1.547 -              __ gsshx(as_Register(src), AT, T9, 0);
   1.548 -           } else {
   1.549 -              __ addu(AT, AT, T9); 
   1.550 -              __ sh(as_Register(src), AT, 0);
   1.551 -           }
   1.552 -        }    
   1.553 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.554 +        __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
   1.555       } else {
   1.556 -        if( Assembler::is_simm16(disp) ) { 
   1.557 -           __ sh(as_Register(src), as_Register(base), disp);
   1.558 -        } else {
   1.559 -           __ move(T9, disp);   
   1.560 -           if( UseLoongsonISA ) {
   1.561 -              __ gsshx(as_Register(src), as_Register(base), T9, 0);
   1.562 -           } else {
   1.563 -              __ addu(AT, as_Register(base), T9); 
   1.564 -              __ sh(as_Register(src), AT, 0);
   1.565 -           }
   1.566 -        }    
   1.567 +        __ sh(as_Register(src), as_Register(base), disp);
   1.568       }
   1.569    %}
   1.570  
   1.571 @@ -2299,50 +1884,10 @@
   1.572       int  disp = $mem$$disp;
   1.573  
   1.574       if( index != 0 ) {
   1.575 -        if( Assembler::is_simm16(disp) ) { 
   1.576 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
   1.577 -              if (scale == 0) {
   1.578 -                 __ gsshx(R0, as_Register(base), as_Register(index), disp);
   1.579 -              } else {
   1.580 -                 __ dsll(AT, as_Register(index), scale);
   1.581 -                 __ gsshx(R0, as_Register(base), AT, disp);
   1.582 -              }
   1.583 -           } else {
   1.584 -              if (scale == 0) {
   1.585 -                 __ addu(AT, as_Register(base), as_Register(index));
   1.586 -              } else {
   1.587 -                 __ dsll(AT, as_Register(index), scale);
   1.588 -                 __ addu(AT, as_Register(base), AT);
   1.589 -              }
   1.590 -              __ sh(R0, AT, disp);
   1.591 -           }
   1.592 -        } else {
   1.593 -           if (scale == 0) {
   1.594 -              __ addu(AT, as_Register(base), as_Register(index));
   1.595 -           } else {
   1.596 -              __ dsll(AT, as_Register(index), scale);
   1.597 -              __ addu(AT, as_Register(base), AT);
   1.598 -           }
   1.599 -           __ move(T9, disp);
   1.600 -           if( UseLoongsonISA ) {
   1.601 -              __ gsshx(R0, AT, T9, 0);
   1.602 -           } else {
   1.603 -              __ addu(AT, AT, T9); 
   1.604 -              __ sh(R0, AT, 0);
   1.605 -           }
   1.606 -        }    
   1.607 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.608 +        __ gsshx(R0, as_Register(base), as_Register(index), disp);
   1.609       } else {
   1.610 -        if( Assembler::is_simm16(disp) ) { 
   1.611 -           __ sh(R0, as_Register(base), disp);
   1.612 -        } else {
   1.613 -           __ move(T9, disp);   
   1.614 -           if( UseLoongsonISA ) {
   1.615 -              __ gsshx(R0, as_Register(base), T9, 0);
   1.616 -           } else {
   1.617 -              __ addu(AT, as_Register(base), T9); 
   1.618 -              __ sh(R0, AT, 0);
   1.619 -           }
   1.620 -        }    
   1.621 +        __ sh(R0, as_Register(base), disp);
   1.622       }
   1.623    %}
   1.624  
   1.625 @@ -2355,50 +1900,10 @@
   1.626       int  disp = $mem$$disp;
   1.627  
   1.628       if( index != 0 ) {
   1.629 -        if( Assembler::is_simm16(disp) ) { 
   1.630 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
   1.631 -              if (scale == 0) {
   1.632 -                 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.633 -              } else {
   1.634 -                 __ dsll(AT, as_Register(index), scale);
   1.635 -                 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
   1.636 -              }
   1.637 -           } else {
   1.638 -              if (scale == 0) {
   1.639 -                 __ addu(AT, as_Register(base), as_Register(index));
   1.640 -              } else {
   1.641 -                 __ dsll(AT, as_Register(index), scale);
   1.642 -                 __ addu(AT, as_Register(base), AT);
   1.643 -              }
   1.644 -              __ lw(as_Register(dst), AT, disp);
   1.645 -           }
   1.646 -        } else {
   1.647 -           if (scale == 0) {
   1.648 -              __ addu(AT, as_Register(base), as_Register(index));
   1.649 -           } else {
   1.650 -              __ dsll(AT, as_Register(index), scale);
   1.651 -              __ addu(AT, as_Register(base), AT);
   1.652 -           }
   1.653 -           __ move(T9, disp);
   1.654 -           if( UseLoongsonISA ) {
   1.655 -              __ gslwx(as_Register(dst), AT, T9, 0);
   1.656 -           } else {
   1.657 -              __ addu(AT, AT, T9); 
   1.658 -              __ lw(as_Register(dst), AT, 0);
   1.659 -           }
   1.660 -        }    
   1.661 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.662 +        __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.663       } else {
   1.664 -        if( Assembler::is_simm16(disp) ) { 
   1.665 -           __ lw(as_Register(dst), as_Register(base), disp);
   1.666 -        } else {
   1.667 -           __ move(T9, disp);   
   1.668 -           if( UseLoongsonISA ) {
   1.669 -              __ gslwx(as_Register(dst), as_Register(base), T9, 0);
   1.670 -           } else {
   1.671 -              __ addu(AT, as_Register(base), T9); 
   1.672 -              __ lw(as_Register(dst), AT, 0);
   1.673 -           }
   1.674 -        }    
   1.675 +        __ lw(as_Register(dst), as_Register(base), disp);
   1.676       }
   1.677    %}
   1.678  
   1.679 @@ -2411,224 +1916,41 @@
   1.680       int  disp = $mem$$disp;
   1.681  
   1.682       if( index != 0 ) {
   1.683 -        if( Assembler::is_simm16(disp) ) { 
   1.684 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
   1.685 -              if (scale == 0) {
   1.686 -                 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
   1.687 -              } else {
   1.688 -                 __ dsll(AT, as_Register(index), scale);
   1.689 -                 __ gsswx(as_Register(src), as_Register(base), AT, disp);
   1.690 -              }
   1.691 -           } else {
   1.692 -              if (scale == 0) {
   1.693 -                 __ addu(AT, as_Register(base), as_Register(index));
   1.694 -              } else {
   1.695 -                 __ dsll(AT, as_Register(index), scale);
   1.696 -                 __ addu(AT, as_Register(base), AT);
   1.697 -              }
   1.698 -              __ sw(as_Register(src), AT, disp);
   1.699 -           }
   1.700 -        } else {
   1.701 -           if (scale == 0) {
   1.702 -              __ addu(AT, as_Register(base), as_Register(index));
   1.703 -           } else {
   1.704 -              __ dsll(AT, as_Register(index), scale);
   1.705 -              __ addu(AT, as_Register(base), AT);
   1.706 -           }
   1.707 -           __ move(T9, disp);
   1.708 -           if( UseLoongsonISA ) {
   1.709 -              __ gsswx(as_Register(src), AT, T9, 0);
   1.710 -           } else {
   1.711 -              __ addu(AT, AT, T9); 
   1.712 -              __ sw(as_Register(src), AT, 0);
   1.713 -           }
   1.714 -        }    
   1.715 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.716 +        __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
   1.717       } else {
   1.718 -        if( Assembler::is_simm16(disp) ) { 
   1.719 -           __ sw(as_Register(src), as_Register(base), disp);
   1.720 -        } else {
   1.721 -           __ move(T9, disp);   
   1.722 -           if( UseLoongsonISA ) {
   1.723 -              __ gsswx(as_Register(src), as_Register(base), T9, 0);
   1.724 -           } else {
   1.725 -              __ addu(AT, as_Register(base), T9); 
   1.726 -              __ sw(as_Register(src), AT, 0);
   1.727 -           }
   1.728 -        }    
   1.729 +        __ sw(as_Register(src), as_Register(base), disp);
   1.730       }
   1.731    %}
   1.732  
   1.733 -  enc_class store_I_immI_enc (memory mem, immI src) %{
   1.734 +  enc_class store_I_immI0_enc (memory mem) %{
   1.735       MacroAssembler _masm(&cbuf);
   1.736 -     int  base = $mem$$base;
   1.737 +     int  base  = $mem$$base;
   1.738       int  index = $mem$$index;
   1.739       int  scale = $mem$$scale;
   1.740 -     int  disp = $mem$$disp;
   1.741 -     int value = $src$$constant;
   1.742 +     int  disp  = $mem$$disp;
   1.743  
   1.744       if( index != 0 ) {
   1.745 -        if ( UseLoongsonISA ) {
   1.746 -           if ( Assembler::is_simm(disp, 8) ) {
   1.747 -              if ( scale == 0 ) {
   1.748 -                 if ( value == 0 ) {
   1.749 -                    __ gsswx(R0, as_Register(base), as_Register(index), disp);
   1.750 -                 } else {
   1.751 -                    __ move(T9, value);
   1.752 -                    __ gsswx(T9, as_Register(base), as_Register(index), disp);
   1.753 -                 }
   1.754 -              } else {
   1.755 -                 __ dsll(AT, as_Register(index), scale);
   1.756 -                 if ( value == 0 ) {
   1.757 -                    __ gsswx(R0, as_Register(base), AT, disp);
   1.758 -                 } else {
   1.759 -                    __ move(T9, value);
   1.760 -                    __ gsswx(T9, as_Register(base), AT, disp);
   1.761 -                 }
   1.762 -              }
   1.763 -           } else if ( Assembler::is_simm16(disp) ) {
   1.764 -                if ( scale == 0 ) {
   1.765 -                   __ daddu(AT, as_Register(base), as_Register(index));
   1.766 -                   if ( value == 0 ) {
   1.767 -                      __ sw(R0, AT, disp);
   1.768 -                   } else {
   1.769 -                      __ move(T9, value);
   1.770 -					  __ sw(T9, AT, disp);
   1.771 -                   }
   1.772 -                } else {
   1.773 -				   __ dsll(AT, as_Register(index), scale);
   1.774 -                   __ daddu(AT, as_Register(base), AT);
   1.775 -                   if ( value == 0 ) {
   1.776 -                      __ sw(R0, AT, disp);
   1.777 -                   } else {
   1.778 -                      __ move(T9, value);
   1.779 -                      __ sw(T9, AT, disp);
   1.780 -				   }
   1.781 -				}
   1.782 -			} else {
   1.783 -                 if ( scale == 0 ) {
   1.784 -                    __ move(T9, disp);
   1.785 -                    __ daddu(AT, as_Register(index), T9);
   1.786 -                    if ( value ==0 ) {
   1.787 -                       __ gsswx(R0, as_Register(base), AT, 0);
   1.788 -                    } else {
   1.789 -                       __ move(T9, value);
   1.790 -                       __ gsswx(T9, as_Register(base), AT, 0);
   1.791 -					}
   1.792 -                 } else {
   1.793 -                      __ dsll(AT, as_Register(index), scale);
   1.794 -					  __ move(T9, disp);
   1.795 -					  __ daddu(AT, AT, T9);
   1.796 -                      if ( value == 0 ) {
   1.797 -                         __ gsswx(R0, as_Register(base), AT, 0);
   1.798 -					  } else {
   1.799 -						 __ move(T9, value);
   1.800 -						 __ gsswx(T9, as_Register(base), AT, 0);
   1.801 -					  }
   1.802 -				 }
   1.803 -			}
   1.804 -		} else { //not use loongson isa
   1.805 -             if (scale == 0) {
   1.806 -                __ daddu(AT, as_Register(base), as_Register(index));
   1.807 -             } else {
   1.808 -                __ dsll(AT, as_Register(index), scale);
   1.809 -			    __ daddu(AT, as_Register(base), AT);
   1.810 -		     }
   1.811 -	         if( Assembler::is_simm16(disp) ) { 
   1.812 -                if (value == 0) {
   1.813 -                   __ sw(R0, AT, disp);
   1.814 -                } else {
   1.815 -		           __ move(T9, value);
   1.816 -			       __ sw(T9, AT, disp);
   1.817 -			    }
   1.818 -		     } else {
   1.819 -                if (value == 0) {
   1.820 -				   __ move(T9, disp);
   1.821 -			       __ daddu(AT, AT, T9); 
   1.822 -			       __ sw(R0, AT, 0);
   1.823 -			    } else {
   1.824 -			       __ move(T9, disp);
   1.825 -			       __ daddu(AT, AT, T9); 
   1.826 -			       __ move(T9, value);
   1.827 -			       __ sw(T9, AT, 0);
   1.828 -			    }
   1.829 -			 }    
   1.830 -		}
   1.831 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.832 +        __ gsswx(R0, as_Register(base), as_Register(index), disp);
   1.833       } else {
   1.834 -		 if ( UseLoongsonISA ) {
   1.835 -			if ( Assembler::is_simm16(disp) ) {
   1.836 -				if ( value == 0 ) {
   1.837 -					__ sw(R0, as_Register(base), disp);
   1.838 -				} else {
   1.839 -					__ move(AT, value);
   1.840 -					__ sw(AT, as_Register(base), disp);
   1.841 -				}
   1.842 -			} else {
   1.843 -				__ move(T9, disp);
   1.844 -				if ( value == 0 ) {
   1.845 -					__ gsswx(R0, as_Register(base), T9, 0);
   1.846 -				} else {
   1.847 -					__ move(AT, value);
   1.848 -					__ gsswx(AT, as_Register(base), T9, 0);
   1.849 -				}
   1.850 -			}
   1.851 -		 } else {
   1.852 -		    if( Assembler::is_simm16(disp) ) { 
   1.853 -			   if (value == 0) {
   1.854 -		          __ sw(R0, as_Register(base), disp);
   1.855 -	           } else {
   1.856 -	              __ move(AT, value);
   1.857 -				  __ sw(AT, as_Register(base), disp);
   1.858 -			   }
   1.859 -		    } else {
   1.860 -	           if (value == 0) {
   1.861 -	              __ move(T9, disp);   
   1.862 -				  __ daddu(AT, as_Register(base), T9); 
   1.863 -			      __ sw(R0, AT, 0);
   1.864 -		      } else {
   1.865 -			      __ move(T9, disp);   
   1.866 -		          __ daddu(AT, as_Register(base), T9); 
   1.867 -				  __ move(T9, value);
   1.868 -			      __ sw(T9, AT, 0);
   1.869 -		       }
   1.870 -	        }
   1.871 -		}
   1.872 +        __ sw(R0, as_Register(base), disp);
   1.873       }
   1.874    %}
   1.875  
   1.876 -  enc_class load_N_enc (mRegN dst, memory mem) %{
   1.877 +  enc_class load_N_enc (mRegN dst, umemory mem) %{
   1.878       MacroAssembler _masm(&cbuf);
   1.879       int  dst = $dst$$reg;
   1.880       int  base = $mem$$base;
   1.881       int  index = $mem$$index;
   1.882       int  scale = $mem$$scale;
   1.883       int  disp = $mem$$disp;
   1.884 -	 relocInfo::relocType disp_reloc = $mem->disp_reloc();
   1.885 -	 assert(disp_reloc == relocInfo::none, "cannot have disp");
   1.886 -
   1.887 -     if( index != 0 ) {
   1.888 -        if (scale == 0) {
   1.889 -           __ daddu(AT, as_Register(base), as_Register(index));
   1.890 -        } else {
   1.891 -           __ dsll(AT, as_Register(index), scale);
   1.892 -           __ daddu(AT, as_Register(base), AT);
   1.893 -        }
   1.894 -        if( Assembler::is_simm16(disp) ) { 
   1.895 -           __ lwu(as_Register(dst), AT, disp);
   1.896 -        } else {
   1.897 -           __ set64(T9, disp);
   1.898 -           __ daddu(AT, AT, T9);
   1.899 -           __ lwu(as_Register(dst), AT, 0);
   1.900 -        }    
   1.901 -     } else {
   1.902 -        if( Assembler::is_simm16(disp) ) { 
   1.903 -           __ lwu(as_Register(dst), as_Register(base), disp);
   1.904 -        } else {
   1.905 -           __ set64(T9, disp);   
   1.906 -           __ daddu(AT, as_Register(base), T9);
   1.907 -           __ lwu(as_Register(dst), AT, 0);
   1.908 -        }    
   1.909 -     }
   1.910 -
   1.911 +
   1.912 +     relocInfo::relocType disp_reloc = $mem->disp_reloc();
   1.913 +     assert(disp_reloc == relocInfo::none, "cannot have disp");
   1.914 +
   1.915 +     assert(index == 0, "no index");
   1.916 +     __ lwu(as_Register(dst), as_Register(base), disp);
   1.917    %}
   1.918  
   1.919  
   1.920 @@ -2639,71 +1961,16 @@
   1.921       int  index = $mem$$index;
   1.922       int  scale = $mem$$scale;
   1.923       int  disp = $mem$$disp;
   1.924 -	 relocInfo::relocType disp_reloc = $mem->disp_reloc();
   1.925 -	 assert(disp_reloc == relocInfo::none, "cannot have disp");
   1.926 +
   1.927 +     relocInfo::relocType disp_reloc = $mem->disp_reloc();
   1.928 +     assert(disp_reloc == relocInfo::none, "cannot have disp");
   1.929  
   1.930       if( index != 0 ) {
   1.931 -        if ( UseLoongsonISA ) {
   1.932 -           if ( Assembler::is_simm(disp, 8) ) {
   1.933 -              if ( scale != 0 ) {
   1.934 -                 __ dsll(AT, as_Register(index), scale);
   1.935 -                 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
   1.936 -              } else {
   1.937 -                 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.938 -              }
   1.939 -           } else if ( Assembler::is_simm16(disp) ){
   1.940 -              if ( scale != 0 ) {
   1.941 -                 __ dsll(AT, as_Register(index), scale);
   1.942 -                 __ daddu(AT, AT, as_Register(base));
   1.943 -              } else {
   1.944 -                 __ daddu(AT, as_Register(index), as_Register(base));
   1.945 -              }
   1.946 -              __ ld(as_Register(dst), AT, disp);
   1.947 -           } else {
   1.948 -                if ( scale != 0 ) {
   1.949 -                   __ dsll(AT, as_Register(index), scale);
   1.950 -                   __ move(T9, disp);
   1.951 -                   __ daddu(AT, AT, T9);
   1.952 -                } else {
   1.953 -                   __ move(T9, disp);
   1.954 -                   __ daddu(AT, as_Register(index), T9);
   1.955 -                }
   1.956 -                __ gsldx(as_Register(dst), as_Register(base), AT, 0);
   1.957 -           }
   1.958 -	    } else { //not use loongson isa
   1.959 -             if (scale == 0) {
   1.960 -                __ daddu(AT, as_Register(base), as_Register(index));
   1.961 -             } else {
   1.962 -                __ dsll(AT, as_Register(index), scale);
   1.963 -                __ daddu(AT, as_Register(base), AT);
   1.964 -             }     
   1.965 -             if( Assembler::is_simm16(disp) ) { 
   1.966 -                __ ld(as_Register(dst), AT, disp);
   1.967 -             } else {
   1.968 -                __ set64(T9, disp);
   1.969 -                __ daddu(AT, AT, T9);
   1.970 -                __ ld(as_Register(dst), AT, 0);
   1.971 -             }  
   1.972 -	    }    
   1.973 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
   1.974 +        __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
   1.975       } else {
   1.976 -	      if ( UseLoongsonISA ) {
   1.977 -	         if ( Assembler::is_simm16(disp) ){
   1.978 -	            __ ld(as_Register(dst), as_Register(base), disp);
   1.979 -	         } else {
   1.980 -	            __ set64(T9, disp);
   1.981 - 	            __ gsldx(as_Register(dst), as_Register(base), T9, 0);
   1.982 -	         }
   1.983 -	      } else { //not use loongson isa
   1.984 -	         if( Assembler::is_simm16(disp) ) { 
   1.985 -                __ ld(as_Register(dst), as_Register(base), disp);
   1.986 -             } else {
   1.987 -                __ set64(T9, disp);   
   1.988 -                __ daddu(AT, as_Register(base), T9);
   1.989 -                __ ld(as_Register(dst), AT, 0);
   1.990 -             }
   1.991 -	      }
   1.992 +        __ ld(as_Register(dst), as_Register(base), disp);
   1.993       }
   1.994 -//     if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
   1.995    %}
   1.996  
   1.997    enc_class store_P_reg_enc (memory mem, mRegP src) %{
   1.998 @@ -2715,65 +1982,10 @@
   1.999       int  disp = $mem$$disp;
  1.1000  
  1.1001       if( index != 0 ) {
  1.1002 -	    if ( UseLoongsonISA ){
  1.1003 -	       if ( Assembler::is_simm(disp, 8) ) {
  1.1004 -	          if ( scale == 0 ) {
  1.1005 -		         __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
  1.1006 -	          } else {
  1.1007 -		         __ dsll(AT, as_Register(index), scale);
  1.1008 -        	     __ gssdx(as_Register(src), as_Register(base), AT, disp);
  1.1009 -	          }
  1.1010 -	       } else if ( Assembler::is_simm16(disp) ) {
  1.1011 -	          if ( scale == 0 ) {
  1.1012 -		         __ daddu(AT, as_Register(base), as_Register(index));
  1.1013 -	          } else {
  1.1014 -                 __ dsll(AT, as_Register(index), scale);
  1.1015 -                 __ daddu(AT, as_Register(base), AT);
  1.1016 -	          }
  1.1017 -              __ sd(as_Register(src), AT, disp);
  1.1018 -	       } else {
  1.1019 -	          if ( scale == 0 ) { 
  1.1020 -                 __ move(T9, disp);
  1.1021 -                 __ daddu(AT, as_Register(index), T9);
  1.1022 -              } else {
  1.1023 -                 __ dsll(AT, as_Register(index), scale);
  1.1024 -                 __ move(T9, disp);
  1.1025 -                 __ daddu(AT, AT, T9);
  1.1026 -              }     
  1.1027 -              __ gssdx(as_Register(src), as_Register(base), AT, 0); 
  1.1028 -           }
  1.1029 -	    } else { //not use loongson isa
  1.1030 -           if (scale == 0) {
  1.1031 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1032 -           } else {
  1.1033 -              __ dsll(AT, as_Register(index), scale);
  1.1034 -              __ daddu(AT, as_Register(base), AT);
  1.1035 -           }
  1.1036 -           if( Assembler::is_simm16(disp) ) { 
  1.1037 -              __ sd(as_Register(src), AT, disp);
  1.1038 -           } else {
  1.1039 -              __ move(T9, disp);
  1.1040 -              __ daddu(AT, AT, T9); 
  1.1041 -              __ sd(as_Register(src), AT, 0);
  1.1042 -           }    
  1.1043 -	    }
  1.1044 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1045 +        __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
  1.1046       } else {
  1.1047 -          if ( UseLoongsonISA ) {
  1.1048 -             if ( Assembler::is_simm16(disp) ) {
  1.1049 -	            __ sd(as_Register(src), as_Register(base), disp);
  1.1050 -	         } else {
  1.1051 -	            __ move(T9, disp);
  1.1052 -                __ gssdx(as_Register(src), as_Register(base), T9, 0);
  1.1053 -	         }
  1.1054 -	      } else {
  1.1055 -             if( Assembler::is_simm16(disp) ) { 
  1.1056 -                 __ sd(as_Register(src), as_Register(base), disp);
  1.1057 -             } else {
  1.1058 -                 __ move(T9, disp);   
  1.1059 -                 __ daddu(AT, as_Register(base), T9); 
  1.1060 -                 __ sd(as_Register(src), AT, 0);
  1.1061 -             }    
  1.1062 -	      }
  1.1063 +        __ sd(as_Register(src), as_Register(base), disp);
  1.1064       }
  1.1065    %}
  1.1066  
  1.1067 @@ -2786,65 +1998,10 @@
  1.1068       int  disp = $mem$$disp;
  1.1069  
  1.1070       if( index != 0 ) {
  1.1071 -        if ( UseLoongsonISA ){
  1.1072 -	       if ( Assembler::is_simm(disp, 8) ) {
  1.1073 -              if ( scale == 0 ) {
  1.1074 -	             __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
  1.1075 -	          } else {
  1.1076 -                 __ dsll(AT, as_Register(index), scale);
  1.1077 -                 __ gsswx(as_Register(src), as_Register(base), AT, disp);
  1.1078 -	          }
  1.1079 -	       } else if ( Assembler::is_simm16(disp) ) {
  1.1080 -              if ( scale == 0 ) {
  1.1081 -                 __ daddu(AT, as_Register(base), as_Register(index));
  1.1082 -	          } else {
  1.1083 -		         __ dsll(AT, as_Register(index), scale);
  1.1084 -		         __ daddu(AT, as_Register(base), AT);
  1.1085 -	          }
  1.1086 - 	          __ sw(as_Register(src), AT, disp);
  1.1087 -	       } else {
  1.1088 -	          if ( scale == 0 ) {
  1.1089 -	             __ move(T9, disp);
  1.1090 -                 __ daddu(AT, as_Register(index), T9);
  1.1091 -	          } else {
  1.1092 -                 __ dsll(AT, as_Register(index), scale);
  1.1093 -	 	         __ move(T9, disp);
  1.1094 -                 __ daddu(AT, AT, T9);
  1.1095 -	          }
  1.1096 -	          __ gsswx(as_Register(src), as_Register(base), AT, 0);
  1.1097 -	       }
  1.1098 -	    } else { //not use loongson isa
  1.1099 -	       if (scale == 0) {
  1.1100 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1101 -           } else {
  1.1102 -              __ dsll(AT, as_Register(index), scale);
  1.1103 -              __ daddu(AT, as_Register(base), AT);
  1.1104 -           }
  1.1105 -           if( Assembler::is_simm16(disp) ) { 
  1.1106 -              __ sw(as_Register(src), AT, disp);
  1.1107 -           } else {
  1.1108 -              __ move(T9, disp);
  1.1109 -              __ daddu(AT, AT, T9);
  1.1110 -              __ sw(as_Register(src), AT, 0);
  1.1111 -           }
  1.1112 -	    }
  1.1113 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1114 +        __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
  1.1115       } else {
  1.1116 -        if ( UseLoongsonISA ) {
  1.1117 -           if ( Assembler::is_simm16(disp) ) {
  1.1118 -	          __ sw(as_Register(src), as_Register(base), disp);
  1.1119 -	       } else {
  1.1120 -	          __ move(T9, disp);
  1.1121 -	          __ gsswx(as_Register(src), as_Register(base), T9, 0);
  1.1122 -	       }
  1.1123 - 	    } else {
  1.1124 -           if( Assembler::is_simm16(disp) ) { 
  1.1125 -              __ sw(as_Register(src), as_Register(base), disp);
  1.1126 -           } else {
  1.1127 -              __ move(T9, disp);   
  1.1128 -              __ daddu(AT, as_Register(base), T9); 
  1.1129 -              __ sw(as_Register(src), AT, 0);
  1.1130 -           }
  1.1131 -	    }    
  1.1132 +	__ sw(as_Register(src), as_Register(base), disp);
  1.1133       }
  1.1134    %}
  1.1135  
  1.1136 @@ -2856,92 +2013,27 @@
  1.1137       int  disp = $mem$$disp;
  1.1138  
  1.1139       if( index != 0 ) {
  1.1140 -        if (scale == 0) {
  1.1141 -           if( Assembler::is_simm16(disp) ) { 
  1.1142 -              if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
  1.1143 -                __ gssdx(R0, as_Register(base), as_Register(index), disp);
  1.1144 -              } else {
  1.1145 -                __ daddu(AT, as_Register(base), as_Register(index));
  1.1146 -                __ sd(R0, AT, disp);
  1.1147 -              }
  1.1148 -           } else {
  1.1149 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1150 -              __ move(T9, disp);
  1.1151 -              if(UseLoongsonISA) {
  1.1152 -                __ gssdx(R0, AT, T9, 0);
  1.1153 -              } else {
  1.1154 -                __ daddu(AT, AT, T9); 
  1.1155 -                __ sd(R0, AT, 0);
  1.1156 -              }
  1.1157 -           }    
  1.1158 -        } else {
  1.1159 -           __ dsll(AT, as_Register(index), scale);
  1.1160 -           if( Assembler::is_simm16(disp) ) { 
  1.1161 -              if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
  1.1162 -                __ gssdx(R0, as_Register(base), AT, disp);
  1.1163 -              } else {
  1.1164 -                __ daddu(AT, as_Register(base), AT);
  1.1165 -                __ sd(R0, AT, disp);
  1.1166 -              }
  1.1167 -           } else {
  1.1168 -              __ daddu(AT, as_Register(base), AT);
  1.1169 -              __ move(T9, disp);
  1.1170 -              if (UseLoongsonISA) {
  1.1171 -                __ gssdx(R0, AT, T9, 0);
  1.1172 -              } else {
  1.1173 -                __ daddu(AT, AT, T9); 
  1.1174 -                __ sd(R0, AT, 0);
  1.1175 -              }
  1.1176 -           }    
  1.1177 -        }
  1.1178 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1179 +        __ gssdx(R0, as_Register(base), as_Register(index), disp);
  1.1180       } else {
  1.1181 -        if( Assembler::is_simm16(disp) ) { 
  1.1182 -           __ sd(R0, as_Register(base), disp);
  1.1183 -        } else {
  1.1184 -           __ move(T9, disp);   
  1.1185 -           if (UseLoongsonISA) {
  1.1186 -             __ gssdx(R0, as_Register(base), T9, 0);
  1.1187 -           } else {
  1.1188 -             __ daddu(AT, as_Register(base), T9); 
  1.1189 -             __ sd(R0, AT, 0);
  1.1190 -           }
  1.1191 -        }    
  1.1192 +        __ sd(R0, as_Register(base), disp);
  1.1193       }
  1.1194    %}
  1.1195  
  1.1196  
  1.1197 -  enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
  1.1198 +  enc_class storeImmN0_enc(memory mem) %{
  1.1199       MacroAssembler _masm(&cbuf);
  1.1200       int  base = $mem$$base;
  1.1201       int  index = $mem$$index;
  1.1202       int  scale = $mem$$scale;
  1.1203       int  disp = $mem$$disp;
  1.1204  
  1.1205 -	 if(index!=0){
  1.1206 -                 if (scale == 0) {
  1.1207 -                    __ daddu(AT, as_Register(base), as_Register(index));
  1.1208 -                 } else {
  1.1209 -                    __ dsll(AT, as_Register(index), scale);
  1.1210 -                    __ daddu(AT, as_Register(base), AT);
  1.1211 -                 }
  1.1212 -
  1.1213 -		 if( Assembler::is_simm16(disp) ) { 
  1.1214 -			 __ sw(R0, AT, disp);
  1.1215 -		 } else {
  1.1216 -			 __ move(T9, disp);
  1.1217 -			 __ daddu(AT, AT, T9); 
  1.1218 -			 __ sw(R0, AT, 0);
  1.1219 -		 }   
  1.1220 -	 } 
  1.1221 -     else {
  1.1222 -		 if( Assembler::is_simm16(disp) ) { 
  1.1223 -			 __ sw(R0, as_Register(base), disp);
  1.1224 -		 } else {
  1.1225 -			 __ move(T9, disp);
  1.1226 -			 __ daddu(AT, as_Register(base), T9); 
  1.1227 -			 __ sw(R0, AT, 0);
  1.1228 -		 }   
  1.1229 -	 }
  1.1230 +     if(index != 0){
  1.1231 +       assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1232 +       __ gsswx(R0, as_Register(base), as_Register(index), disp);
  1.1233 +     } else {
  1.1234 +       __ sw(R0, as_Register(base), disp);
  1.1235 +     }
  1.1236    %} 
  1.1237  
  1.1238    enc_class load_L_enc (mRegL dst, memory mem) %{
  1.1239 @@ -2976,7 +2068,7 @@
  1.1240       }
  1.1241    %}
  1.1242  
  1.1243 -  enc_class store_L_immL0_enc (memory mem, immL0 src) %{
  1.1244 +  enc_class store_L_immL0_enc (memory mem) %{
  1.1245       MacroAssembler _masm(&cbuf);
  1.1246       int  base = $mem$$base;
  1.1247       int  index = $mem$$index;
  1.1248 @@ -3000,50 +2092,10 @@
  1.1249       FloatRegister dst = $dst$$FloatRegister;
  1.1250  
  1.1251       if( index != 0 ) {
  1.1252 -        if( Assembler::is_simm16(disp) ) { 
  1.1253 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
  1.1254 -              if (scale == 0) {
  1.1255 -                 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
  1.1256 -              } else {
  1.1257 -                 __ dsll(AT, as_Register(index), scale);
  1.1258 -                 __ gslwxc1(dst, as_Register(base), AT, disp);
  1.1259 -              }
  1.1260 -           } else {
  1.1261 -              if (scale == 0) {
  1.1262 -                 __ daddu(AT, as_Register(base), as_Register(index));
  1.1263 -              } else {
  1.1264 -                 __ dsll(AT, as_Register(index), scale);
  1.1265 -                 __ daddu(AT, as_Register(base), AT);
  1.1266 -              }
  1.1267 -              __ lwc1(dst, AT, disp);
  1.1268 -           }
  1.1269 -        } else {
  1.1270 -           if (scale == 0) {
  1.1271 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1272 -           } else {
  1.1273 -              __ dsll(AT, as_Register(index), scale);
  1.1274 -              __ daddu(AT, as_Register(base), AT);
  1.1275 -           }
  1.1276 -           __ move(T9, disp);
  1.1277 -           if( UseLoongsonISA ) {
  1.1278 -              __ gslwxc1(dst, AT, T9, 0);
  1.1279 -           } else {
  1.1280 -              __ daddu(AT, AT, T9); 
  1.1281 -              __ lwc1(dst, AT, 0);
  1.1282 -           }
  1.1283 -        }    
  1.1284 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1285 +        __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
  1.1286       } else {
  1.1287 -        if( Assembler::is_simm16(disp) ) { 
  1.1288 -           __ lwc1(dst, as_Register(base), disp);
  1.1289 -        } else {
  1.1290 -           __ move(T9, disp);   
  1.1291 -           if( UseLoongsonISA ) {
  1.1292 -              __ gslwxc1(dst, as_Register(base), T9, 0);
  1.1293 -           } else {
  1.1294 -              __ daddu(AT, as_Register(base), T9); 
  1.1295 -              __ lwc1(dst, AT, 0);
  1.1296 -           }
  1.1297 -        }    
  1.1298 +        __ lwc1(dst, as_Register(base), disp);
  1.1299       }
  1.1300    %}
  1.1301  
  1.1302 @@ -3056,50 +2108,10 @@
  1.1303       FloatRegister src = $src$$FloatRegister;
  1.1304  
  1.1305       if( index != 0 ) {
  1.1306 -        if( Assembler::is_simm16(disp) ) { 
  1.1307 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
  1.1308 -              if (scale == 0) {
  1.1309 -                 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
  1.1310 -              } else {
  1.1311 -                 __ dsll(AT, as_Register(index), scale);
  1.1312 -                 __ gsswxc1(src, as_Register(base), AT, disp);
  1.1313 -              }
  1.1314 -           } else {
  1.1315 -              if (scale == 0) {
  1.1316 -                 __ daddu(AT, as_Register(base), as_Register(index));
  1.1317 -              } else {
  1.1318 -                 __ dsll(AT, as_Register(index), scale);
  1.1319 -                 __ daddu(AT, as_Register(base), AT);
  1.1320 -              }
  1.1321 -              __ swc1(src, AT, disp);
  1.1322 -           }
  1.1323 -        } else {
  1.1324 -           if (scale == 0) {
  1.1325 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1326 -           } else {
  1.1327 -              __ dsll(AT, as_Register(index), scale);
  1.1328 -              __ daddu(AT, as_Register(base), AT);
  1.1329 -           }
  1.1330 -           __ move(T9, disp);
  1.1331 -           if( UseLoongsonISA ) {
  1.1332 -              __ gsswxc1(src, AT, T9, 0);
  1.1333 -           } else {
  1.1334 -              __ daddu(AT, AT, T9); 
  1.1335 -              __ swc1(src, AT, 0);
  1.1336 -           }
  1.1337 -        }    
  1.1338 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1339 +        __ gsswxc1(src, as_Register(base), as_Register(index), disp);
  1.1340       } else {
  1.1341 -        if( Assembler::is_simm16(disp) ) { 
  1.1342 -           __ swc1(src, as_Register(base), disp);
  1.1343 -        } else {
  1.1344 -           __ move(T9, disp);   
  1.1345 -           if( UseLoongsonISA ) {
  1.1346 -              __ gslwxc1(src, as_Register(base), T9, 0);
  1.1347 -           } else {
  1.1348 -              __ daddu(AT, as_Register(base), T9); 
  1.1349 -              __ swc1(src, AT, 0);
  1.1350 -           }
  1.1351 -        }    
  1.1352 +        __ swc1(src, as_Register(base), disp);
  1.1353       }
  1.1354    %}
  1.1355  
  1.1356 @@ -3112,106 +2124,26 @@
  1.1357       FloatRegister dst_reg = as_FloatRegister($dst$$reg);
  1.1358  
  1.1359       if( index != 0 ) {
  1.1360 -        if( Assembler::is_simm16(disp) ) { 
  1.1361 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
  1.1362 -              if (scale == 0) {
  1.1363 -                 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
  1.1364 -              } else {
  1.1365 -                 __ dsll(AT, as_Register(index), scale);
  1.1366 -                 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
  1.1367 -              }
  1.1368 -           } else {
  1.1369 -              if (scale == 0) {
  1.1370 -                 __ daddu(AT, as_Register(base), as_Register(index));
  1.1371 -              } else {
  1.1372 -                 __ dsll(AT, as_Register(index), scale);
  1.1373 -                 __ daddu(AT, as_Register(base), AT);
  1.1374 -              }
  1.1375 -              __ ldc1(dst_reg, AT, disp);
  1.1376 -           }
  1.1377 -        } else {
  1.1378 -           if (scale == 0) {
  1.1379 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1380 -           } else {
  1.1381 -              __ dsll(AT, as_Register(index), scale);
  1.1382 -              __ daddu(AT, as_Register(base), AT);
  1.1383 -           }
  1.1384 -           __ move(T9, disp);
  1.1385 -           if( UseLoongsonISA ) {
  1.1386 -              __ gsldxc1(dst_reg, AT, T9, 0);
  1.1387 -           } else {
  1.1388 -              __ addu(AT, AT, T9); 
  1.1389 -              __ ldc1(dst_reg, AT, 0);
  1.1390 -           }
  1.1391 -        }    
  1.1392 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1393 +        __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
  1.1394       } else {
  1.1395 -        if( Assembler::is_simm16(disp) ) { 
  1.1396 -           __ ldc1(dst_reg, as_Register(base), disp);
  1.1397 -        } else {
  1.1398 -           __ move(T9, disp);   
  1.1399 -           if( UseLoongsonISA ) {
  1.1400 -              __ gsldxc1(dst_reg, as_Register(base), T9, 0);
  1.1401 -           } else {
  1.1402 -              __ addu(AT, as_Register(base), T9); 
  1.1403 -              __ ldc1(dst_reg, AT, 0);
  1.1404 -           }
  1.1405 -        }    
  1.1406 +        __ ldc1(dst_reg, as_Register(base), disp);
  1.1407       }
  1.1408    %}
  1.1409  
  1.1410    enc_class store_D_reg_enc (memory mem, regD src) %{
  1.1411       MacroAssembler _masm(&cbuf);
  1.1412 -     int  base = $mem$$base;
  1.1413 +     int  base  = $mem$$base;
  1.1414       int  index = $mem$$index;
  1.1415       int  scale = $mem$$scale;
  1.1416 -     int  disp = $mem$$disp;
  1.1417 +     int  disp  = $mem$$disp;
  1.1418       FloatRegister src_reg = as_FloatRegister($src$$reg);
  1.1419  
  1.1420       if( index != 0 ) {
  1.1421 -        if( Assembler::is_simm16(disp) ) { 
  1.1422 -           if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
  1.1423 -              if (scale == 0) {
  1.1424 -                 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
  1.1425 -              } else {
  1.1426 -                 __ dsll(AT, as_Register(index), scale);
  1.1427 -                 __ gssdxc1(src_reg, as_Register(base), AT, disp);
  1.1428 -              }
  1.1429 -           } else {
  1.1430 -              if (scale == 0) {
  1.1431 -                 __ daddu(AT, as_Register(base), as_Register(index));
  1.1432 -              } else {
  1.1433 -                 __ dsll(AT, as_Register(index), scale);
  1.1434 -                 __ daddu(AT, as_Register(base), AT);
  1.1435 -              }
  1.1436 -              __ sdc1(src_reg, AT, disp);
  1.1437 -           }
  1.1438 -        } else {
  1.1439 -           if (scale == 0) {
  1.1440 -              __ daddu(AT, as_Register(base), as_Register(index));
  1.1441 -           } else {
  1.1442 -              __ dsll(AT, as_Register(index), scale);
  1.1443 -              __ daddu(AT, as_Register(base), AT);
  1.1444 -           }
  1.1445 -           __ move(T9, disp);
  1.1446 -           if( UseLoongsonISA ) {
  1.1447 -              __ gssdxc1(src_reg, AT, T9, 0);
  1.1448 -           } else {
  1.1449 -              __ addu(AT, AT, T9); 
  1.1450 -              __ sdc1(src_reg, AT, 0);
  1.1451 -           }
  1.1452 -        }    
  1.1453 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1454 +        __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
  1.1455       } else {
  1.1456 -        if( Assembler::is_simm16(disp) ) { 
  1.1457 -           __ sdc1(src_reg, as_Register(base), disp);
  1.1458 -        } else {
  1.1459 -           __ move(T9, disp);   
  1.1460 -           if( UseLoongsonISA ) {
  1.1461 -              __ gssdxc1(src_reg, as_Register(base), T9, 0);
  1.1462 -           } else {
  1.1463 -              __ addu(AT, as_Register(base), T9); 
  1.1464 -              __ sdc1(src_reg, AT, 0);
  1.1465 -           }
  1.1466 -        }    
  1.1467 +        __ sdc1(src_reg, as_Register(base), disp);
  1.1468       }
  1.1469    %}
  1.1470  
  1.1471 @@ -5249,6 +4181,18 @@
  1.1472    gsBaseIndexOffset0NarrowKlass
  1.1473  );
  1.1474  
  1.1475 +// For loading unsigned values
  1.1476 +// umemory --> unsigned memory
  1.1477 +opclass umemory(
  1.1478 +  baseOffset16,
  1.1479 +  baseOffset0,
  1.1480 +
  1.1481 +  baseOffset16Narrow,
  1.1482 +  baseOffset0Narrow,
  1.1483 +
  1.1484 +  baseOffset16NarrowKlass,
  1.1485 +  baseOffset0NarrowKlass
  1.1486 +);
  1.1487  
  1.1488  
  1.1489  //----------Conditional Branch Operands----------------------------------------
  1.1490 @@ -5725,7 +4669,7 @@
  1.1491  %}
  1.1492  
  1.1493  // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
  1.1494 -instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
  1.1495 +instruct loadI2UB(mRegI dst, umemory mem, immI_255 mask) %{
  1.1496    match(Set dst (AndI (LoadI mem) mask));
  1.1497  
  1.1498    ins_cost(125);
  1.1499 @@ -5745,7 +4689,7 @@
  1.1500  %}
  1.1501  
  1.1502  // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
  1.1503 -instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
  1.1504 +instruct loadI2US(mRegI dst, umemory mem, immI_65535 mask) %{
  1.1505    match(Set dst (AndI (LoadI mem) mask));
  1.1506  
  1.1507    ins_cost(125);
  1.1508 @@ -5792,12 +4736,12 @@
  1.1509  
  1.1510    ins_cost(180);
  1.1511    format %{ "sd    $mem,   zero #@storeL_immL0" %}
  1.1512 -  ins_encode(store_L_immL0_enc(mem, zero));
  1.1513 +  ins_encode(store_L_immL0_enc(mem));
  1.1514    ins_pipe( ialu_storeL );
  1.1515  %}
  1.1516  
  1.1517  // Load Compressed Pointer
  1.1518 -instruct loadN(mRegN dst, memory mem)
  1.1519 +instruct loadN(mRegN dst, umemory mem)
  1.1520  %{
  1.1521     match(Set dst (LoadN mem));
  1.1522  
  1.1523 @@ -5807,7 +4751,7 @@
  1.1524     ins_pipe( ialu_loadI ); // XXX
  1.1525  %}
  1.1526  
  1.1527 -instruct loadN2P(mRegP dst, memory mem)
  1.1528 +instruct loadN2P(mRegP dst, umemory mem)
  1.1529  %{
  1.1530     match(Set dst (DecodeN (LoadN mem)));
  1.1531     predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
  1.1532 @@ -5839,7 +4783,7 @@
  1.1533  %}
  1.1534  
  1.1535  // Load narrow Klass Pointer
  1.1536 -instruct loadNKlass(mRegN dst, memory mem)
  1.1537 +instruct loadNKlass(mRegN dst, umemory mem)
  1.1538  %{
  1.1539    match(Set dst (LoadNKlass mem));
  1.1540  
  1.1541 @@ -5849,7 +4793,7 @@
  1.1542    ins_pipe( ialu_loadI ); // XXX
  1.1543  %}
  1.1544  
  1.1545 -instruct loadN2PKlass(mRegP dst, memory mem)
  1.1546 +instruct loadN2PKlass(mRegP dst, umemory mem)
  1.1547  %{
  1.1548    match(Set dst (DecodeNKlass (LoadNKlass mem)));
  1.1549    predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
  1.1550 @@ -5964,16 +4908,6 @@
  1.1551    ins_pipe( ialu_storeI );
  1.1552  %}
  1.1553  
  1.1554 -// Store Byte Immediate
  1.1555 -instruct storeImmB(memory mem, immI8 src) %{
  1.1556 -  match(Set mem (StoreB mem src));
  1.1557 -
  1.1558 -  ins_cost(150);
  1.1559 -  format %{ "movb   $mem, $src #@storeImmB" %}
  1.1560 -  ins_encode(store_B_immI_enc(mem, src));
  1.1561 -  ins_pipe( ialu_storeI );
  1.1562 -%}
  1.1563 -
  1.1564  // Store Compressed Pointer
  1.1565  instruct storeN(memory mem, mRegN src)
  1.1566  %{
  1.1567 @@ -6023,7 +4957,7 @@
  1.1568  
  1.1569    ins_cost(125); // XXX
  1.1570    format %{ "storeN0    $mem, R12\t# compressed ptr" %}
  1.1571 -  ins_encode(storeImmN0_enc(mem, zero));
  1.1572 +  ins_encode(storeImmN0_enc(mem));
  1.1573    ins_pipe( ialu_storeI );
  1.1574  %}
  1.1575  
  1.1576 @@ -6037,6 +4971,15 @@
  1.1577    ins_pipe( ialu_storeI );
  1.1578  %}
  1.1579  
  1.1580 +instruct storeB0(memory mem, immI0 zero) %{
  1.1581 +  match(Set mem (StoreB mem zero));
  1.1582 +
  1.1583 +  ins_cost(100);
  1.1584 +  format %{ "sb    $zero, $mem #@storeB0" %}
  1.1585 +  ins_encode(store_B0_enc(mem));
  1.1586 +  ins_pipe( ialu_storeI );
  1.1587 +%}
  1.1588 +
  1.1589  instruct storeB_convL2I(memory mem, mRegL src) %{
  1.1590    match(Set mem (StoreB mem (ConvL2I src)));
  1.1591  
  1.1592 @@ -6066,7 +5009,7 @@
  1.1593  %}
  1.1594  
  1.1595  // Load Byte (8bit UNsigned)
  1.1596 -instruct loadUB(mRegI dst, memory mem) %{
  1.1597 +instruct loadUB(mRegI dst, umemory mem) %{
  1.1598    match(Set dst (LoadUB mem));
  1.1599  
  1.1600    ins_cost(125);
  1.1601 @@ -6075,7 +5018,7 @@
  1.1602    ins_pipe( ialu_loadI );
  1.1603  %}
  1.1604  
  1.1605 -instruct loadUB_convI2L(mRegL dst, memory mem) %{
  1.1606 +instruct loadUB_convI2L(mRegL dst, umemory mem) %{
  1.1607    match(Set dst (ConvI2L (LoadUB mem)));
  1.1608  
  1.1609    ins_cost(125);
  1.1610 @@ -6114,12 +5057,12 @@
  1.1611  %}
  1.1612  
  1.1613  // Store Integer Immediate
  1.1614 -instruct storeImmI(memory mem, immI src) %{
  1.1615 -  match(Set mem (StoreI mem src));
  1.1616 -
  1.1617 -  ins_cost(150);
  1.1618 -  format %{ "mov    $mem, $src #@storeImmI" %}
  1.1619 -  ins_encode(store_I_immI_enc(mem, src));
  1.1620 +instruct storeI0(memory mem, immI0 zero) %{
  1.1621 +  match(Set mem (StoreI mem zero));
  1.1622 +
  1.1623 +  ins_cost(100);
  1.1624 +  format %{ "sw    $mem, $zero #@storeI0" %}
  1.1625 +  ins_encode(store_I_immI0_enc(mem));
  1.1626    ins_pipe( ialu_storeI );
  1.1627  %}
  1.1628  
  1.1629 @@ -10148,20 +9091,20 @@
  1.1630  %}
  1.1631  */
  1.1632  
  1.1633 -instruct lbu_and_lmask(mRegI dst, memory mem,  immI_255 mask) %{
  1.1634 +instruct lbu_and_lmask(mRegI dst, umemory mem,  immI_255 mask) %{
  1.1635    match(Set dst (AndI mask (LoadB mem)));
  1.1636    ins_cost(60);
  1.1637  
  1.1638 -  format %{ "lhu  $dst, $mem #@lbu_and_lmask" %}
  1.1639 +  format %{ "lbu  $dst, $mem #@lbu_and_lmask" %}
  1.1640    ins_encode(load_UB_enc(dst, mem));
  1.1641    ins_pipe( ialu_loadI );
  1.1642  %}
  1.1643  
  1.1644 -instruct lbu_and_rmask(mRegI dst, memory mem,  immI_255 mask) %{
  1.1645 +instruct lbu_and_rmask(mRegI dst, umemory mem,  immI_255 mask) %{
  1.1646    match(Set dst (AndI (LoadB mem) mask));
  1.1647    ins_cost(60);
  1.1648  
  1.1649 -  format %{ "lhu  $dst, $mem #@lbu_and_rmask" %}
  1.1650 +  format %{ "lbu  $dst, $mem #@lbu_and_rmask" %}
  1.1651    ins_encode(load_UB_enc(dst, mem));
  1.1652    ins_pipe( ialu_loadI );
  1.1653  %}
  1.1654 @@ -11805,7 +10748,7 @@
  1.1655  
  1.1656  // Prefetch instructions.
  1.1657  
  1.1658 -instruct prefetchrNTA( memory mem ) %{
  1.1659 +instruct prefetchrNTA( umemory mem ) %{
  1.1660    match(PrefetchRead mem);
  1.1661    ins_cost(125);
  1.1662  
  1.1663 @@ -11816,29 +10759,14 @@
  1.1664      int  scale = $mem$$scale;
  1.1665      int  disp = $mem$$disp;
  1.1666  
  1.1667 -    if( index != 0 ) {
  1.1668 -      if (scale == 0) {
  1.1669 -        __ daddu(AT, as_Register(base), as_Register(index));
  1.1670 -      } else {
  1.1671 -        __ dsll(AT, as_Register(index), scale);
  1.1672 -        __ daddu(AT, as_Register(base), AT);
  1.1673 -      }
  1.1674 -    } else {
  1.1675 -      __ move(AT, as_Register(base));
  1.1676 -    }
  1.1677 -    if( Assembler::is_simm16(disp) ) {
  1.1678 -      __ daddiu(AT, as_Register(base), disp);
  1.1679 -      __ daddiu(AT, AT, disp);
  1.1680 -    } else {
  1.1681 -      __ move(T9, disp);
  1.1682 -      __ daddu(AT, as_Register(base), T9);
  1.1683 -    }
  1.1684 +    assert(index == 0, "no index");
  1.1685 +    __ daddiu(AT, as_Register(base), disp);
  1.1686      __ pref(0, AT, 0); //hint: 0:load
  1.1687    %}
  1.1688    ins_pipe(pipe_slow);
  1.1689  %}
  1.1690  
  1.1691 -instruct prefetchwNTA( memory mem ) %{
  1.1692 +instruct prefetchwNTA( umemory mem ) %{
  1.1693    match(PrefetchWrite mem);
  1.1694    ins_cost(125);
  1.1695    format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
  1.1696 @@ -11848,24 +10776,9 @@
  1.1697      int  scale = $mem$$scale;
  1.1698      int  disp = $mem$$disp;
  1.1699  
  1.1700 -    if( index != 0 ) {
  1.1701 -      if (scale == 0) {
  1.1702 -        __ daddu(AT, as_Register(base), as_Register(index));
  1.1703 -      } else {
  1.1704 -        __ dsll(AT, as_Register(index), scale);
  1.1705 -        __ daddu(AT, as_Register(base), AT);
  1.1706 -      }
  1.1707 -    } else {
  1.1708 -      __ move(AT, as_Register(base));
  1.1709 -    }
  1.1710 -    if( Assembler::is_simm16(disp) ) {
  1.1711 -      __ daddiu(AT, as_Register(base), disp);
  1.1712 -      __ daddiu(AT, AT, disp);
  1.1713 -    } else {
  1.1714 -      __ move(T9, disp);
  1.1715 -      __ daddu(AT, as_Register(base), T9);
  1.1716 -    }
  1.1717 -     __ pref(1, AT, 0); //hint: 1:store
  1.1718 +    assert(index == 0, "no index");
  1.1719 +    __ daddiu(AT, as_Register(base), disp);
  1.1720 +    __ pref(1, AT, 0); //hint: 1:store
  1.1721    %}
  1.1722    ins_pipe(pipe_slow);
  1.1723  %}
  1.1724 @@ -11885,50 +10798,10 @@
  1.1725       Register dst = R0;
  1.1726  
  1.1727       if( index != 0 ) {
  1.1728 -        if( Assembler::is_simm16(disp) ) { 
  1.1729 -           if( UseLoongsonISA ) {
  1.1730 -              if (scale == 0) {
  1.1731 -                 __ gslbx(dst, as_Register(base), as_Register(index), disp);
  1.1732 -              } else {
  1.1733 -                 __ dsll(AT, as_Register(index), scale);
  1.1734 -                 __ gslbx(dst, as_Register(base), AT, disp);
  1.1735 -              }
  1.1736 -           } else {
  1.1737 -              if (scale == 0) {
  1.1738 -                 __ addu(AT, as_Register(base), as_Register(index));
  1.1739 -              } else {
  1.1740 -                 __ dsll(AT, as_Register(index), scale);
  1.1741 -                 __ addu(AT, as_Register(base), AT);
  1.1742 -              }
  1.1743 -              __ lb(dst, AT, disp);
  1.1744 -           }
  1.1745 -        } else {
  1.1746 -           if (scale == 0) {
  1.1747 -              __ addu(AT, as_Register(base), as_Register(index));
  1.1748 -           } else {
  1.1749 -              __ dsll(AT, as_Register(index), scale);
  1.1750 -              __ addu(AT, as_Register(base), AT);
  1.1751 -           }
  1.1752 -           __ move(T9, disp);
  1.1753 -           if( UseLoongsonISA ) {
  1.1754 -              __ gslbx(dst, AT, T9, 0);
  1.1755 -           } else {
  1.1756 -              __ addu(AT, AT, T9); 
  1.1757 -              __ lb(dst, AT, 0);
  1.1758 -           }
  1.1759 -        }    
  1.1760 +        assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1761 +        __ gslbx(dst, as_Register(base), as_Register(index), disp);
  1.1762       } else {
  1.1763 -        if( Assembler::is_simm16(disp) ) { 
  1.1764 -           __ lb(dst, as_Register(base), disp);
  1.1765 -        } else {
  1.1766 -           __ move(T9, disp);   
  1.1767 -           if( UseLoongsonISA ) {
  1.1768 -              __ gslbx(dst, as_Register(base), T9, 0);
  1.1769 -           } else {
  1.1770 -              __ addu(AT, as_Register(base), T9); 
  1.1771 -              __ lb(dst, AT, 0);
  1.1772 -           }
  1.1773 -        }    
  1.1774 +        __ lb(dst, as_Register(base), disp);
  1.1775       }
  1.1776    %}
  1.1777    ins_pipe(pipe_slow);
  1.1778 @@ -11949,7 +10822,7 @@
  1.1779  %}
  1.1780  
  1.1781  // Load Char (16bit unsigned)
  1.1782 -instruct loadUS(mRegI dst, memory mem) %{
  1.1783 +instruct loadUS(mRegI dst, umemory mem) %{
  1.1784    match(Set dst (LoadUS mem));
  1.1785  
  1.1786    ins_cost(125);
  1.1787 @@ -11958,7 +10831,7 @@
  1.1788    ins_pipe( ialu_loadI );
  1.1789  %}
  1.1790  
  1.1791 -instruct loadUS_convI2L(mRegL dst, memory mem) %{
  1.1792 +instruct loadUS_convI2L(mRegL dst, umemory mem) %{
  1.1793    match(Set dst (ConvI2L (LoadUS mem)));
  1.1794  
  1.1795    ins_cost(125);
  1.1796 @@ -12033,7 +10906,7 @@
  1.1797    ins_encode %{
  1.1798      FloatRegister dst = as_FloatRegister($dst$$reg);
  1.1799  
  1.1800 -      __ dmtc1(R0, dst);
  1.1801 +    __ dmtc1(R0, dst);
  1.1802    %}
  1.1803    ins_pipe( fpu_loadF );
  1.1804  %}
  1.1805 @@ -12083,66 +10956,10 @@
  1.1806      int      disp = $mem$$disp;
  1.1807  
  1.1808      if( index != 0 ) {
  1.1809 -		if ( UseLoongsonISA ) {
  1.1810 -			if ( Assembler::is_simm(disp, 8) ) {
  1.1811 -				if ( scale == 0 ) {
  1.1812 -					__ gsswx(R0, as_Register(base), as_Register(index), disp);
  1.1813 -				} else {
  1.1814 -					__ dsll(T9, as_Register(index), scale);
  1.1815 -					__ gsswx(R0, as_Register(base), T9, disp);
  1.1816 -				}
  1.1817 -			} else if ( Assembler::is_simm16(disp) ) {
  1.1818 -				if ( scale == 0 ) {
  1.1819 -					__ daddu(AT, as_Register(base), as_Register(index));
  1.1820 -				} else {
  1.1821 -					__ dsll(T9, as_Register(index), scale);
  1.1822 -					__ daddu(AT, as_Register(base), T9);
  1.1823 -				}
  1.1824 -				__ sw(R0, AT, disp);
  1.1825 -			} else {
  1.1826 -				if ( scale == 0 ) {
  1.1827 -					__ move(T9, disp);
  1.1828 -					__ daddu(AT, as_Register(index), T9);
  1.1829 -					__ gsswx(R0, as_Register(base), AT, 0);
  1.1830 -				} else {
  1.1831 -					__ dsll(T9, as_Register(index), scale);
  1.1832 -					__ move(AT, disp);
  1.1833 -					__ daddu(AT, AT, T9);
  1.1834 -					__ gsswx(R0, as_Register(base), AT, 0);
  1.1835 -				}
  1.1836 -			}
  1.1837 -		} else { //not use loongson isa
  1.1838 -		    if(scale != 0) {
  1.1839 -		       __ dsll(T9, as_Register(index), scale);
  1.1840 -		       __ daddu(AT, as_Register(base), T9);
  1.1841 -		    } else {
  1.1842 -		       __ daddu(AT, as_Register(base), as_Register(index));
  1.1843 -		    }
  1.1844 -		   if( Assembler::is_simm16(disp) ) { 
  1.1845 -		      __ sw(R0, AT, disp);
  1.1846 -		   } else {
  1.1847 -		      __ move(T9, disp);
  1.1848 -		      __ daddu(AT, AT, T9);
  1.1849 -			  __ sw(R0, AT, 0);
  1.1850 -	       }
  1.1851 -	   }
  1.1852 -    } else { //index is 0
  1.1853 -		if ( UseLoongsonISA ) {
  1.1854 -			if ( Assembler::is_simm16(disp) ) {
  1.1855 -				__ sw(R0, as_Register(base), disp);
  1.1856 -			} else {
  1.1857 -				__ move(T9, disp);
  1.1858 -				__ gsswx(R0, as_Register(base), T9, 0);
  1.1859 -			}
  1.1860 -		} else {
  1.1861 -		   if( Assembler::is_simm16(disp) ) { 
  1.1862 -		      __ sw(R0, as_Register(base), disp);
  1.1863 -		   } else {
  1.1864 -		      __ move(T9, disp);
  1.1865 -		      __ daddu(AT, as_Register(base), T9);
  1.1866 -			  __ sw(R0, AT, 0);
  1.1867 -		   }
  1.1868 -		}
  1.1869 +       assert(UseLoongsonISA, "Only supported for Loongson CPUs");
  1.1870 +       __ gsswx(R0, as_Register(base), as_Register(index), disp);
  1.1871 +    } else {
  1.1872 +       __ sw(R0, as_Register(base), disp);
  1.1873      }
  1.1874    %}
  1.1875    ins_pipe( ialu_storeI );
  1.1876 @@ -12177,87 +10994,6 @@
  1.1877    ins_pipe( fpu_storeF );
  1.1878  %}
  1.1879  
  1.1880 -instruct storeD_imm0( memory mem, immD0 zero) %{
  1.1881 -  match(Set mem (StoreD mem zero));
  1.1882 -
  1.1883 -  ins_cost(40);
  1.1884 -  format %{ "store   $mem, zero\t# store float @ storeD_imm0" %}
  1.1885 -  ins_encode %{
  1.1886 -    int      base = $mem$$base;
  1.1887 -    int     index = $mem$$index;
  1.1888 -    int     scale = $mem$$scale;
  1.1889 -    int      disp = $mem$$disp;
  1.1890 -
  1.1891 -    __ mtc1(R0, F30);
  1.1892 -    __ cvt_d_w(F30, F30);
  1.1893 -
  1.1894 -    if( index != 0 ) {
  1.1895 -		if ( UseLoongsonISA ) {
  1.1896 -			if ( Assembler::is_simm(disp, 8) ) {
  1.1897 -				if (scale == 0) {
  1.1898 -					__ gssdxc1(F30, as_Register(base), as_Register(index), disp);
  1.1899 -				} else {
  1.1900 -					__ dsll(T9, as_Register(index), scale);
  1.1901 -					__ gssdxc1(F30, as_Register(base), T9, disp);
  1.1902 -				}
  1.1903 -			} else if ( Assembler::is_simm16(disp) ) {
  1.1904 -				if (scale == 0) {
  1.1905 -					__ daddu(AT, as_Register(base), as_Register(index));
  1.1906 -					__ sdc1(F30, AT, disp);
  1.1907 -				} else {
  1.1908 -					__ dsll(T9, as_Register(index), scale);
  1.1909 -					__ daddu(AT, as_Register(base), T9);
  1.1910 -					__ sdc1(F30, AT, disp);
  1.1911 -				}
  1.1912 -			} else {
  1.1913 -				if (scale == 0) {
  1.1914 -					__ move(T9, disp);
  1.1915 -					__ daddu(AT, as_Register(index), T9);
  1.1916 -					__ gssdxc1(F30, as_Register(base), AT, 0);
  1.1917 -				} else {
  1.1918 -					__ move(T9, disp);
  1.1919 -					__ dsll(AT, as_Register(index), scale);
  1.1920 -					__ daddu(AT, AT, T9);
  1.1921 -					__ gssdxc1(F30, as_Register(base), AT, 0);
  1.1922 -				}
  1.1923 -			}
  1.1924 -		} else { // not use loongson isa
  1.1925 -		    if(scale != 0) {
  1.1926 -		       __ dsll(T9, as_Register(index), scale);
  1.1927 -		       __ daddu(AT, as_Register(base), T9);
  1.1928 -		    } else {
  1.1929 -		       __ daddu(AT, as_Register(base), as_Register(index));
  1.1930 -		    }
  1.1931 -		   if( Assembler::is_simm16(disp) ) { 
  1.1932 -		      __ sdc1(F30, AT, disp);
  1.1933 -		   } else {
  1.1934 -		      __ move(T9, disp);
  1.1935 -		      __ daddu(AT, AT, T9);
  1.1936 -		      __ sdc1(F30, AT, 0);
  1.1937 -		   }
  1.1938 -		}
  1.1939 -    } else {// index is 0
  1.1940 -		if ( UseLoongsonISA ) {
  1.1941 -			if ( Assembler::is_simm16(disp) ) {
  1.1942 -				__ sdc1(F30, as_Register(base), disp);
  1.1943 -			} else {
  1.1944 -				__ move(T9, disp);
  1.1945 -				__ gssdxc1(F30, as_Register(base), T9, 0);
  1.1946 -			}
  1.1947 -		} else {
  1.1948 -		   if( Assembler::is_simm16(disp) ) { 
  1.1949 -		      __ sdc1(F30, as_Register(base), disp);
  1.1950 -		   } else {
  1.1951 -		      __ move(T9, disp);
  1.1952 -		      __ daddu(AT, as_Register(base), T9);
  1.1953 -		      __ sdc1(F30, AT, 0);
  1.1954 -		   }
  1.1955 -		}
  1.1956 -    }
  1.1957 -  %}
  1.1958 -  ins_pipe( ialu_storeI );
  1.1959 -%}
  1.1960 -
  1.1961  instruct loadSSI(mRegI dst, stackSlotI src)
  1.1962  %{
  1.1963    match(Set dst src);
  1.1964 @@ -12416,13 +11152,21 @@
  1.1965  %}
  1.1966  
  1.1967  // Store CMS card-mark Immediate
  1.1968 -instruct storeImmCM(memory mem, immI8 src) %{
  1.1969 +instruct storeImmCM(memory mem, mRegI src) %{
  1.1970    match(Set mem (StoreCM mem src));
  1.1971  
  1.1972 -  ins_cost(150);
  1.1973 -  format %{ "MOV8   $mem,$src\t! CMS card-mark imm0" %}
  1.1974 -//  opcode(0xC6);
  1.1975 -  ins_encode(store_B_immI_enc_sync(mem, src));
  1.1976 +  ins_cost(500);
  1.1977 +  format %{ "sb   $src, $mem  (CMS card-mark) @ storeImmCM" %}
  1.1978 +  ins_encode(store_B_reg_sync_enc(mem, src));
  1.1979 +  ins_pipe( ialu_storeI );
  1.1980 +%}
  1.1981 +
  1.1982 +instruct storeI0CM(memory mem, immI0 zero) %{
  1.1983 +  match(Set mem (StoreCM mem zero));
  1.1984 +
  1.1985 +  ins_cost(450);
  1.1986 +  format %{ "sb   $zero, $mem  (CMS card-mark) @ storeI0CM" %}
  1.1987 +  ins_encode(store_B0_sync_enc(mem));
  1.1988    ins_pipe( ialu_storeI );
  1.1989  %}
  1.1990  
  1.1991 @@ -12958,7 +11702,7 @@
  1.1992  
  1.1993  // Match loading integer and casting it to unsigned int in long register.
  1.1994  // LoadI + ConvI2L + AndL 0xffffffff.
  1.1995 -instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
  1.1996 +instruct loadUI2L_rmask(mRegL dst, umemory mem, immL_32bits mask) %{
  1.1997    match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
  1.1998  
  1.1999    format %{ "lwu     $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
  1.2000 @@ -12966,7 +11710,7 @@
  1.2001    ins_pipe(ialu_loadI);
  1.2002  %}
  1.2003  
  1.2004 -instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
  1.2005 +instruct loadUI2L_lmask(mRegL dst, umemory mem, immL_32bits mask) %{
  1.2006    match(Set dst (AndL mask (ConvI2L (LoadI mem))));
  1.2007  
  1.2008    format %{ "lwu     $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}

mercurial