# HG changeset patch # User fujie # Date 1490877959 14400 # Node ID d3aefa77da6c8b4528341a79e7cc1f5e044b76de # Parent 76857a2c353455175534207cc594f194b76224eb [C2] Performance of scimark.monte_carlo is about 5% up. diff -r 76857a2c3534 -r d3aefa77da6c src/cpu/mips/vm/icache_mips.cpp --- a/src/cpu/mips/vm/icache_mips.cpp Tue Mar 28 16:09:10 2017 -0400 +++ b/src/cpu/mips/vm/icache_mips.cpp Thu Mar 30 08:45:59 2017 -0400 @@ -40,18 +40,34 @@ void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {}; void ICache::call_flush_stub(address start, int lines) { - cacheflush(start, lines * line_size , ICACHE); + //in fact, the current os implementation simply flush all ICACHE&DCACHE +#ifndef CACHE_OPT + /* Loongson3A supports automatic synchronization between Icache and Dcache. + * No manual synchronization is needed. */ + cacheflush(start, lines * line_size , ICACHE); +#endif +// sysmips(3, 0, 0, 0); } void ICache::invalidate_word(address addr) { - cacheflush(addr,4, ICACHE); + //cacheflush(addr, 4, ICACHE); + +#ifndef CACHE_OPT + cacheflush(addr,4, ICACHE); +#endif +// sysmips(3, 0, 0, 0); } void ICache::invalidate_range(address start, int nbytes) { - cacheflush(start, nbytes, ICACHE); +#ifndef CACHE_OPT + cacheflush(start, nbytes, ICACHE); +#endif +// sysmips(3, 0, 0, 0); } void ICache::invalidate_all() { - sysmips(3, 0, 0, 0); +#ifndef CACHE_OPT + sysmips(3, 0, 0, 0); +#endif } diff -r 76857a2c3534 -r d3aefa77da6c src/cpu/mips/vm/mips_64.ad --- a/src/cpu/mips/vm/mips_64.ad Tue Mar 28 16:09:10 2017 -0400 +++ b/src/cpu/mips/vm/mips_64.ad Thu Mar 30 08:45:59 2017 -0400 @@ -1704,15 +1704,55 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA ) { + if (scale == 0) { + __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gslbx(as_Register(dst), as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ lb(as_Register(dst), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslbx(as_Register(dst), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ lb(as_Register(dst), AT, 0); + } + } } else { - __ lb(as_Register(dst), as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ lb(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslbx(as_Register(dst), as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ lb(as_Register(dst), AT, 0); + } + } } %} //Load byte unsigned - enc_class load_UB_enc (mRegI dst, umemory mem) %{ + enc_class load_UB_enc (mRegI dst, memory mem) %{ MacroAssembler _masm(&cbuf); int dst = $dst$$reg; int base = $mem$$base; @@ -1720,8 +1760,29 @@ int scale = $mem$$scale; int disp = $mem$$disp; - assert(index == 0, "no index"); - __ lbu(as_Register(dst), as_Register(base), disp); + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ lbu(as_Register(dst), AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ lbu(as_Register(dst), AT, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ lbu(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ lbu(as_Register(dst), AT, 0); + } + } %} enc_class store_B_reg_enc (memory mem, mRegI src) %{ @@ -1733,58 +1794,334 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp); + if (scale == 0) { + if( Assembler::is_simm(disp, 8) ) { + if (UseLoongsonISA) { + __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp); + } else { + __ addu(AT, as_Register(base), as_Register(index)); + __ sb(as_Register(src), AT, disp); + } + } else if( Assembler::is_simm16(disp) ) { + __ addu(AT, as_Register(base), as_Register(index)); + __ sb(as_Register(src), AT, disp); + } else { + __ addu(AT, as_Register(base), as_Register(index)); + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssbx(as_Register(src), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sb(as_Register(src), AT, 0); + } + } + } else { + __ dsll(AT, as_Register(index), scale); + if( Assembler::is_simm(disp, 8) ) { + if (UseLoongsonISA) { + __ gssbx(as_Register(src), AT, as_Register(base), disp); + } else { + __ addu(AT, as_Register(base), AT); + __ sb(as_Register(src), AT, disp); + } + } else if( Assembler::is_simm16(disp) ) { + __ addu(AT, as_Register(base), AT); + __ sb(as_Register(src), AT, disp); + } else { + __ addu(AT, as_Register(base), AT); + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssbx(as_Register(src), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sb(as_Register(src), AT, 0); + } + } + } } else { - __ sb(as_Register(src), as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sb(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssbx(as_Register(src), as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ sb(as_Register(src), AT, 0); + } + } } %} - enc_class store_B0_enc (memory mem) %{ + enc_class store_B_immI_enc (memory mem, immI8 src) %{ MacroAssembler _masm(&cbuf); int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; int disp = $mem$$disp; + int value = $src$$constant; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssbx(R0, as_Register(base), as_Register(index), disp); + if (!UseLoongsonISA) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sb(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ move(T9, value); + __ sb(T9, AT, 0); + } + } + } else { + + if (scale == 0) { + if( Assembler::is_simm(disp, 8) ) { + if (value == 0) { + __ gssbx(R0, as_Register(base), as_Register(index), disp); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), as_Register(index), disp); + } + } else if( Assembler::is_simm16(disp) ) { + __ daddu(AT, as_Register(base), as_Register(index)); + if (value == 0) { + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } else { + if (value == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + __ move(T9, disp); + __ gssbx(R0, AT, T9, 0); + } else { + __ move(AT, disp); + __ move(T9, value); + __ daddu(AT, as_Register(base), AT); + __ gssbx(T9, AT, as_Register(index), 0); + } + } + + } else { + + if( Assembler::is_simm(disp, 8) ) { + __ dsll(AT, as_Register(index), scale); + if (value == 0) { + __ gssbx(R0, as_Register(base), AT, disp); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, disp); + } + } else if( Assembler::is_simm16(disp) ) { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + if (value == 0) { + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } else { + __ dsll(AT, as_Register(index), scale); + if (value == 0) { + __ daddu(AT, as_Register(base), AT); + __ move(T9, disp); + __ gssbx(R0, AT, T9, 0); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, 0); + } + } + } + } } else { - __ sb(R0, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sb(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sb(AT, as_Register(base), disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssbx(R0, as_Register(base), T9, 0); + } else { + __ daddu(AT, as_Register(base), T9); + __ sb(R0, AT, 0); + } + } else { + __ move(T9, disp); + if (UseLoongsonISA) { + __ move(AT, value); + __ gssbx(AT, as_Register(base), T9, 0); + } else { + __ daddu(AT, as_Register(base), T9); + __ move(T9, value); + __ sb(T9, AT, 0); + } + } + } } %} - enc_class store_B_reg_sync_enc (memory mem, mRegI src) %{ - MacroAssembler _masm(&cbuf); - int src = $src$$reg; - int base = $mem$$base; - int index = $mem$$index; - int scale = $mem$$scale; - int disp = $mem$$disp; - - if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp); - } else { - __ sb(as_Register(src), as_Register(base), disp); - } - __ sync(); - %} - - enc_class store_B0_sync_enc (memory mem) %{ + + enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{ MacroAssembler _masm(&cbuf); int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; int disp = $mem$$disp; + int value = $src$$constant; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssbx(R0, as_Register(base), as_Register(index), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp,8) ) { + if ( scale == 0 ) { + if ( value == 0 ) { + __ gssbx(R0, as_Register(base), as_Register(index), disp); + } else { + __ move(AT, value); + __ gssbx(AT, as_Register(base), as_Register(index), disp); + } + } else { + __ dsll(AT, as_Register(index), scale); + if ( value == 0 ) { + __ gssbx(R0, as_Register(base), AT, disp); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, disp); + } + } + } else if ( Assembler::is_simm16(disp) ) { + if ( scale == 0 ) { + __ daddu(AT, as_Register(base), as_Register(index)); + if ( value == 0 ){ + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + if ( value == 0 ) { + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } + } else { + if ( scale == 0 ) { + __ move(AT, disp); + __ daddu(AT, as_Register(index), AT); + if ( value == 0 ) { + __ gssbx(R0, as_Register(base), AT, 0); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, 0); + } + } else { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + if ( value == 0 ) { + __ gssbx(R0, as_Register(base), AT, 0); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, 0); + } + } + } + } else { //not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sb(R0, AT, disp); + } else { + __ move(T9, value); + __ sb(T9, AT, disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sb(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ move(T9, value); + __ sb(T9, AT, 0); + } + } + } } else { - __ sb(R0, as_Register(base), disp); + if ( UseLoongsonISA ){ + if ( Assembler::is_simm16(disp) ){ + if ( value == 0 ) { + __ sb(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sb(AT, as_Register(base), disp); + } + } else { + __ move(AT, disp); + if ( value == 0 ) { + __ gssbx(R0, as_Register(base), AT, 0); + } else { + __ move(T9, value); + __ gssbx(T9, as_Register(base), AT, 0); + } + } + } else { + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sb(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sb(AT, as_Register(base), disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sb(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ move(T9, value); + __ sb(T9, AT, 0); + } + } + } } + __ sync(); %} @@ -1798,15 +2135,72 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp); - } else { - __ lh(as_Register(dst), as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gslhx(as_Register(dst), as_Register(base), AT, disp); + } + } else if ( Assembler::is_simm16(disp) ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + __ lh(as_Register(dst), AT, disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + __ lh(as_Register(dst), AT, disp); + } + } else { + if (scale == 0) { + __ move(AT, disp); + __ daddu(AT, as_Register(index), AT); + __ gslhx(as_Register(dst), as_Register(base), AT, 0); + } else { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ gslhx(as_Register(dst), as_Register(base), AT, 0); + } + } + } else { // not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ lh(as_Register(dst), AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ lh(as_Register(dst), AT, 0); + } + } + } else { // index is 0 + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + __ lh(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + __ gslhx(as_Register(dst), as_Register(base), T9, 0); + } + } else { //not use loongson isa + if( Assembler::is_simm16(disp) ) { + __ lh(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ lh(as_Register(dst), AT, 0); + } + } } %} // Load Char (16bit unsigned) - enc_class load_C_enc (mRegI dst, umemory mem) %{ + enc_class load_C_enc (mRegI dst, memory mem) %{ MacroAssembler _masm(&cbuf); int dst = $dst$$reg; int base = $mem$$base; @@ -1814,8 +2208,29 @@ int scale = $mem$$scale; int disp = $mem$$disp; - assert(index == 0, "no index"); - __ lhu(as_Register(dst), as_Register(base), disp); + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ lhu(as_Register(dst), AT, disp); + } else { + __ move(T9, disp); + __ addu(AT, AT, T9); + __ lhu(as_Register(dst), AT, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ lhu(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ lhu(as_Register(dst), AT, 0); + } + } %} // Store Char (16bit unsigned) @@ -1828,10 +2243,50 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsshx(as_Register(src), as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ sh(as_Register(src), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsshx(as_Register(src), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sh(as_Register(src), AT, 0); + } + } } else { - __ sh(as_Register(src), as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sh(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsshx(as_Register(src), as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ sh(as_Register(src), AT, 0); + } + } } %} @@ -1843,10 +2298,50 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsshx(R0, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gsshx(R0, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsshx(R0, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ sh(R0, AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsshx(R0, AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sh(R0, AT, 0); + } + } } else { - __ sh(R0, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sh(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsshx(R0, as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ sh(R0, AT, 0); + } + } } %} @@ -1859,10 +2354,50 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gslwx(as_Register(dst), as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ lw(as_Register(dst), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslwx(as_Register(dst), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ lw(as_Register(dst), AT, 0); + } + } } else { - __ lw(as_Register(dst), as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ lw(as_Register(dst), as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslwx(as_Register(dst), as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ lw(as_Register(dst), AT, 0); + } + } } %} @@ -1875,41 +2410,224 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsswx(as_Register(src), as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ sw(as_Register(src), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsswx(as_Register(src), AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sw(as_Register(src), AT, 0); + } + } } else { - __ sw(as_Register(src), as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sw(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsswx(as_Register(src), as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ sw(as_Register(src), AT, 0); + } + } } %} - enc_class store_I_immI0_enc (memory mem) %{ + enc_class store_I_immI_enc (memory mem, immI src) %{ MacroAssembler _masm(&cbuf); - int base = $mem$$base; + int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; - int disp = $mem$$disp; + int disp = $mem$$disp; + int value = $src$$constant; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswx(R0, as_Register(base), as_Register(index), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp, 8) ) { + if ( scale == 0 ) { + if ( value == 0 ) { + __ gsswx(R0, as_Register(base), as_Register(index), disp); + } else { + __ move(T9, value); + __ gsswx(T9, as_Register(base), as_Register(index), disp); + } + } else { + __ dsll(AT, as_Register(index), scale); + if ( value == 0 ) { + __ gsswx(R0, as_Register(base), AT, disp); + } else { + __ move(T9, value); + __ gsswx(T9, as_Register(base), AT, disp); + } + } + } else if ( Assembler::is_simm16(disp) ) { + if ( scale == 0 ) { + __ daddu(AT, as_Register(base), as_Register(index)); + if ( value == 0 ) { + __ sw(R0, AT, disp); + } else { + __ move(T9, value); + __ sw(T9, AT, disp); + } + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + if ( value == 0 ) { + __ sw(R0, AT, disp); + } else { + __ move(T9, value); + __ sw(T9, AT, disp); + } + } + } else { + if ( scale == 0 ) { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + if ( value ==0 ) { + __ gsswx(R0, as_Register(base), AT, 0); + } else { + __ move(T9, value); + __ gsswx(T9, as_Register(base), AT, 0); + } + } else { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + if ( value == 0 ) { + __ gsswx(R0, as_Register(base), AT, 0); + } else { + __ move(T9, value); + __ gsswx(T9, as_Register(base), AT, 0); + } + } + } + } else { //not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sw(R0, AT, disp); + } else { + __ move(T9, value); + __ sw(T9, AT, disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sw(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ move(T9, value); + __ sw(T9, AT, 0); + } + } + } } else { - __ sw(R0, as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + if ( value == 0 ) { + __ sw(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sw(AT, as_Register(base), disp); + } + } else { + __ move(T9, disp); + if ( value == 0 ) { + __ gsswx(R0, as_Register(base), T9, 0); + } else { + __ move(AT, value); + __ gsswx(AT, as_Register(base), T9, 0); + } + } + } else { + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sw(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sw(AT, as_Register(base), disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sw(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ move(T9, value); + __ sw(T9, AT, 0); + } + } + } } %} - enc_class load_N_enc (mRegN dst, umemory mem) %{ + enc_class load_N_enc (mRegN dst, memory mem) %{ MacroAssembler _masm(&cbuf); int dst = $dst$$reg; int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; int disp = $mem$$disp; - - relocInfo::relocType disp_reloc = $mem->disp_reloc(); - assert(disp_reloc == relocInfo::none, "cannot have disp"); - - assert(index == 0, "no index"); - __ lwu(as_Register(dst), as_Register(base), disp); + relocInfo::relocType disp_reloc = $mem->disp_reloc(); + assert(disp_reloc == relocInfo::none, "cannot have disp"); + + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ lwu(as_Register(dst), AT, disp); + } else { + __ set64(T9, disp); + __ daddu(AT, AT, T9); + __ lwu(as_Register(dst), AT, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ lwu(as_Register(dst), as_Register(base), disp); + } else { + __ set64(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ lwu(as_Register(dst), AT, 0); + } + } + %} @@ -1920,16 +2638,71 @@ int index = $mem$$index; int scale = $mem$$scale; int disp = $mem$$disp; - - relocInfo::relocType disp_reloc = $mem->disp_reloc(); - assert(disp_reloc == relocInfo::none, "cannot have disp"); + relocInfo::relocType disp_reloc = $mem->disp_reloc(); + assert(disp_reloc == relocInfo::none, "cannot have disp"); if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp, 8) ) { + if ( scale != 0 ) { + __ dsll(AT, as_Register(index), scale); + __ gsldx(as_Register(dst), as_Register(base), AT, disp); + } else { + __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp); + } + } else if ( Assembler::is_simm16(disp) ){ + if ( scale != 0 ) { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, AT, as_Register(base)); + } else { + __ daddu(AT, as_Register(index), as_Register(base)); + } + __ ld(as_Register(dst), AT, disp); + } else { + if ( scale != 0 ) { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + } + __ gsldx(as_Register(dst), as_Register(base), AT, 0); + } + } else { //not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ ld(as_Register(dst), AT, disp); + } else { + __ set64(T9, disp); + __ daddu(AT, AT, T9); + __ ld(as_Register(dst), AT, 0); + } + } } else { - __ ld(as_Register(dst), as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ){ + __ ld(as_Register(dst), as_Register(base), disp); + } else { + __ set64(T9, disp); + __ gsldx(as_Register(dst), as_Register(base), T9, 0); + } + } else { //not use loongson isa + if( Assembler::is_simm16(disp) ) { + __ ld(as_Register(dst), as_Register(base), disp); + } else { + __ set64(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ ld(as_Register(dst), AT, 0); + } + } } +// if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0); %} enc_class store_P_reg_enc (memory mem, mRegP src) %{ @@ -1941,10 +2714,65 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp); + if ( UseLoongsonISA ){ + if ( Assembler::is_simm(disp, 8) ) { + if ( scale == 0 ) { + __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gssdx(as_Register(src), as_Register(base), AT, disp); + } + } else if ( Assembler::is_simm16(disp) ) { + if ( scale == 0 ) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ sd(as_Register(src), AT, disp); + } else { + if ( scale == 0 ) { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + } else { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + } + __ gssdx(as_Register(src), as_Register(base), AT, 0); + } + } else { //not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ sd(as_Register(src), AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sd(as_Register(src), AT, 0); + } + } } else { - __ sd(as_Register(src), as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + __ sd(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + __ gssdx(as_Register(src), as_Register(base), T9, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ sd(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sd(as_Register(src), AT, 0); + } + } } %} @@ -1957,10 +2785,65 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); + if ( UseLoongsonISA ){ + if ( Assembler::is_simm(disp, 8) ) { + if ( scale == 0 ) { + __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsswx(as_Register(src), as_Register(base), AT, disp); + } + } else if ( Assembler::is_simm16(disp) ) { + if ( scale == 0 ) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ sw(as_Register(src), AT, disp); + } else { + if ( scale == 0 ) { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + } else { + __ dsll(AT, as_Register(index), scale); + __ move(T9, disp); + __ daddu(AT, AT, T9); + } + __ gsswx(as_Register(src), as_Register(base), AT, 0); + } + } else { //not use loongson isa + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ sw(as_Register(src), AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sw(as_Register(src), AT, 0); + } + } } else { - __ sw(as_Register(src), as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + __ sw(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + __ gsswx(as_Register(src), as_Register(base), T9, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ sw(as_Register(src), as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sw(as_Register(src), AT, 0); + } + } } %} @@ -1972,29 +2855,303 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssdx(R0, as_Register(base), as_Register(index), disp); + if (scale == 0) { + if( Assembler::is_simm16(disp) ) { + if (UseLoongsonISA && Assembler::is_simm(disp, 8)) { + __ gssdx(R0, as_Register(base), as_Register(index), disp); + } else { + __ daddu(AT, as_Register(base), as_Register(index)); + __ sd(R0, AT, disp); + } + } else { + __ daddu(AT, as_Register(base), as_Register(index)); + __ move(T9, disp); + if(UseLoongsonISA) { + __ gssdx(R0, AT, T9, 0); + } else { + __ daddu(AT, AT, T9); + __ sd(R0, AT, 0); + } + } + } else { + __ dsll(AT, as_Register(index), scale); + if( Assembler::is_simm16(disp) ) { + if (UseLoongsonISA && Assembler::is_simm(disp, 8)) { + __ gssdx(R0, as_Register(base), AT, disp); + } else { + __ daddu(AT, as_Register(base), AT); + __ sd(R0, AT, disp); + } + } else { + __ daddu(AT, as_Register(base), AT); + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssdx(R0, AT, T9, 0); + } else { + __ daddu(AT, AT, T9); + __ sd(R0, AT, 0); + } + } + } } else { - __ sd(R0, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sd(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + if (UseLoongsonISA) { + __ gssdx(R0, as_Register(base), T9, 0); + } else { + __ daddu(AT, as_Register(base), T9); + __ sd(R0, AT, 0); + } + } } %} - - enc_class storeImmN0_enc(memory mem) %{ + enc_class store_P_immP_enc (memory mem, immP31 src) %{ MacroAssembler _masm(&cbuf); int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; int disp = $mem$$disp; - - if(index != 0){ - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswx(R0, as_Register(base), as_Register(index), disp); + long value = $src$$constant; + + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sd(R0, AT, disp); + } else { + __ move(T9, value); + __ sd(T9, AT, disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sd(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ move(T9, value); + __ sd(T9, AT, 0); + } + } } else { - __ sw(R0, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + if (value == 0) { + __ sd(R0, as_Register(base), disp); + } else { + __ move(AT, value); + __ sd(AT, as_Register(base), disp); + } + } else { + if (value == 0) { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sd(R0, AT, 0); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ move(T9, value); + __ sd(T9, AT, 0); + } + } } + %} + + enc_class storeImmN0_enc(memory mem, ImmN0 src) %{ + MacroAssembler _masm(&cbuf); + int base = $mem$$base; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + + if(index!=0){ + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + + if( Assembler::is_simm16(disp) ) { + __ sw(R0, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sw(R0, AT, 0); + } + } + else { + if( Assembler::is_simm16(disp) ) { + __ sw(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sw(R0, AT, 0); + } + } %} + enc_class storeImmN_enc (memory mem, immN src) %{ + MacroAssembler _masm(&cbuf); + int base = $mem$$base; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + long * value = (long *)$src$$constant; + + if (value == NULL) { + guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!"); + if (index == 0) { + __ sw(R0, as_Register(base), disp); + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ sw(R0, AT, disp); + } + + return; + } + + int oop_index = __ oop_recorder()->find_index((jobject)value); + RelocationHolder rspec = oop_Relocation::spec(oop_index); + + guarantee(scale == 0, "FIXME: scale is not zero !"); + guarantee(value != 0, "FIXME: value is zero !"); + + if (index != 0) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + if(rspec.type() != relocInfo::none) { + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, oop_index); + } else { + __ set64(T9, oop_index); + } + __ sw(T9, AT, disp); + } else { + __ move(T9, disp); + __ addu(AT, AT, T9); + + if(rspec.type() != relocInfo::none) { + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, oop_index); + } else { + __ set64(T9, oop_index); + } + __ sw(T9, AT, 0); + } + } + else { + if( Assembler::is_simm16(disp) ) { + if($src->constant_reloc() != relocInfo::none) { + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, oop_index); + } else { + __ set64(T9, oop_index); + } + __ sw(T9, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + + if($src->constant_reloc() != relocInfo::none){ + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, oop_index); + } else { + __ set64(T9, oop_index); + } + __ sw(T9, AT, 0); + } + } + %} + + enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{ + MacroAssembler _masm(&cbuf); + + assert (UseCompressedOops, "should only be used for compressed headers"); + assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder"); + + int base = $mem$$base; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + long value = $src$$constant; + + int klass_index = __ oop_recorder()->find_index((Klass*)value); + RelocationHolder rspec = metadata_Relocation::spec(klass_index); + long narrowp = Klass::encode_klass((Klass*)value); + + if(index!=0){ + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + + if( Assembler::is_simm16(disp) ) { + if(rspec.type() != relocInfo::none){ + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, narrowp); + } else { + __ set64(T9, narrowp); + } + __ sw(T9, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + + if(rspec.type() != relocInfo::none){ + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, narrowp); + } else { + __ set64(T9, narrowp); + } + + __ sw(T9, AT, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + if(rspec.type() != relocInfo::none){ + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, narrowp); + } + else { + __ set64(T9, narrowp); + } + __ sw(T9, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + + if(rspec.type() != relocInfo::none){ + __ relocate(rspec, Assembler::narrow_oop_operand); + __ patchable_set48(T9, narrowp); + } else { + __ set64(T9, narrowp); + } + __ sw(T9, AT, 0); + } + } + %} + enc_class load_L_enc (mRegL dst, memory mem) %{ MacroAssembler _masm(&cbuf); int base = $mem$$base; @@ -2003,11 +3160,31 @@ int disp = $mem$$disp; Register dst_reg = as_Register($dst$$reg); + // For implicit null check + __ lb(AT, as_Register(base), 0); + if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsldx(dst_reg, as_Register(base), as_Register(index), disp); + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ ld(dst_reg, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ ld(dst_reg, AT, 0); + } } else { - __ ld(dst_reg, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ ld(dst_reg, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ ld(dst_reg, AT, 0); + } } %} @@ -2020,14 +3197,31 @@ Register src_reg = as_Register($src$$reg); if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssdx(src_reg, as_Register(base), as_Register(index), disp); + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ sd(src_reg, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sd(src_reg, AT, 0); + } } else { - __ sd(src_reg, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sd(src_reg, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sd(src_reg, AT, 0); + } } %} - enc_class store_L_immL0_enc (memory mem) %{ + enc_class store_L_immL0_enc (memory mem, immL0 src) %{ MacroAssembler _masm(&cbuf); int base = $mem$$base; int index = $mem$$index; @@ -2035,10 +3229,68 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssdx(R0, as_Register(base), as_Register(index), disp); + // For implicit null check + __ lb(AT, as_Register(base), 0); + + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ sd(R0, AT, disp); + } else { + __ move(T9, disp); + __ addu(AT, AT, T9); + __ sd(R0, AT, 0); + } } else { - __ sd(R0, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sd(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + __ addu(AT, as_Register(base), T9); + __ sd(R0, AT, 0); + } + } + %} + + enc_class store_L_immL_enc (memory mem, immL src) %{ + MacroAssembler _masm(&cbuf); + int base = $mem$$base; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + long imm = $src$$constant; + + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + if( Assembler::is_simm16(disp) ) { + __ set64(T9, imm); + __ sd(T9, AT, disp); + } else { + __ move(T9, disp); + __ addu(AT, AT, T9); + __ set64(T9, imm); + __ sd(T9, AT, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ move(AT, as_Register(base)); + __ set64(T9, imm); + __ sd(T9, AT, disp); + } else { + __ move(T9, disp); + __ addu(AT, as_Register(base), T9); + __ set64(T9, imm); + __ sd(T9, AT, 0); + } } %} @@ -2051,10 +3303,50 @@ FloatRegister dst = $dst$$FloatRegister; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gslwxc1(dst, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gslwxc1(dst, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gslwxc1(dst, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ lwc1(dst, AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslwxc1(dst, AT, T9, 0); + } else { + __ daddu(AT, AT, T9); + __ lwc1(dst, AT, 0); + } + } } else { - __ lwc1(dst, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ lwc1(dst, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslwxc1(dst, as_Register(base), T9, 0); + } else { + __ daddu(AT, as_Register(base), T9); + __ lwc1(dst, AT, 0); + } + } } %} @@ -2067,10 +3359,50 @@ FloatRegister src = $src$$FloatRegister; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswxc1(src, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gsswxc1(src, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsswxc1(src, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ swc1(src, AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsswxc1(src, AT, T9, 0); + } else { + __ daddu(AT, AT, T9); + __ swc1(src, AT, 0); + } + } } else { - __ swc1(src, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ swc1(src, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslwxc1(src, as_Register(base), T9, 0); + } else { + __ daddu(AT, as_Register(base), T9); + __ swc1(src, AT, 0); + } + } } %} @@ -2083,26 +3415,106 @@ FloatRegister dst_reg = as_FloatRegister($dst$$reg); if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gsldxc1(dst_reg, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ ldc1(dst_reg, AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsldxc1(dst_reg, AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ ldc1(dst_reg, AT, 0); + } + } } else { - __ ldc1(dst_reg, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ ldc1(dst_reg, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gsldxc1(dst_reg, as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ ldc1(dst_reg, AT, 0); + } + } } %} enc_class store_D_reg_enc (memory mem, regD src) %{ MacroAssembler _masm(&cbuf); - int base = $mem$$base; + int base = $mem$$base; int index = $mem$$index; int scale = $mem$$scale; - int disp = $mem$$disp; + int disp = $mem$$disp; FloatRegister src_reg = as_FloatRegister($src$$reg); if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gssdxc1(src_reg, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ sdc1(src_reg, AT, disp); + } + } else { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gssdxc1(src_reg, AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ sdc1(src_reg, AT, 0); + } + } } else { - __ sdc1(src_reg, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ sdc1(src_reg, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gssdxc1(src_reg, as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ sdc1(src_reg, AT, 0); + } + } } %} @@ -2663,6 +4075,17 @@ interface(CONST_INTER); %} +operand immP31() +%{ + predicate(n->as_Type()->type()->reloc() == relocInfo::none + && (n->get_ptr() >> 31) == 0); + match(ConP); + + op_cost(5); + format %{ %} + interface(CONST_INTER); +%} + // NULL Pointer Immediate operand immP0() %{ predicate( n->get_ptr() == 0 ); @@ -3933,13 +5356,223 @@ %} //----------Memory Operands---------------------------------------------------- -operand baseOffset16(mRegP reg, immL16 off) +// Indirect Memory Operand +operand indirect(mRegP reg) %{ + constraint(ALLOC_IN_RC(p_reg)); + match(reg); + + format %{ "[$reg] @ indirect" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0x0); /* NO_INDEX */ + scale(0x0); + disp(0x0); + %} +%} + +// Indirect Memory Plus Short Offset Operand +operand indOffset8(mRegP reg, immL8 off) %{ constraint(ALLOC_IN_RC(p_reg)); match(AddP reg off); + op_cost(10); + format %{ "[$reg + $off (8-bit)] @ indOffset8" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0x0); /* NO_INDEX */ + scale(0x0); + disp($off); + %} +%} + +// Indirect Memory Times Scale Plus Index Register +operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale) +%{ + constraint(ALLOC_IN_RC(p_reg)); + match(AddP reg (LShiftL lreg scale)); + + op_cost(10); + format %{"[$reg + $lreg << $scale] @ indIndexScale" %} + interface(MEMORY_INTER) %{ + base($reg); + index($lreg); + scale($scale); + disp(0x0); + %} +%} + + +// [base + index + offset] +operand baseIndexOffset8(mRegP base, mRegL index, immL8 off) +%{ + constraint(ALLOC_IN_RC(p_reg)); op_cost(5); - format %{ "[$reg + $off (16-bit)] @ baseOffset16" %} + match(AddP (AddP base index) off); + + format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %} + interface(MEMORY_INTER) %{ + base($base); + index($index); + scale(0x0); + disp($off); + %} +%} + +// [base + index + offset] +operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off) +%{ + constraint(ALLOC_IN_RC(p_reg)); + op_cost(5); + match(AddP (AddP base (ConvI2L index)) off); + + format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %} + interface(MEMORY_INTER) %{ + base($base); + index($index); + scale(0x0); + disp($off); + %} +%} + +// Indirect Memory Times Scale Plus Index Register Plus Offset Operand +operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale) +%{ + constraint(ALLOC_IN_RC(p_reg)); + match(AddP (AddP reg (LShiftL lreg scale)) off); + + op_cost(10); + format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %} + interface(MEMORY_INTER) %{ + base($reg); + index($lreg); + scale($scale); + disp($off); + %} +%} + +operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale) +%{ + constraint(ALLOC_IN_RC(p_reg)); + match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off); + + op_cost(10); + format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %} + interface(MEMORY_INTER) %{ + base($reg); + index($ireg); + scale($scale); + disp($off); + %} +%} + +// [base + index<in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); + op_cost(10); + match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off); + + format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %} + interface(MEMORY_INTER) %{ + base($base); + index($index); + scale($scale); + disp($off); + %} +%} + +// Indirect Memory Times Scale Plus Index Register Plus Offset Operand +operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale) +%{ + predicate(Universe::narrow_oop_shift() == 0); + constraint(ALLOC_IN_RC(p_reg)); + match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off); + + op_cost(10); + format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %} + interface(MEMORY_INTER) %{ + base($reg); + index($lreg); + scale($scale); + disp($off); + %} +%} + +// [base + index<in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); + predicate(Universe::narrow_oop_shift() == 0); + op_cost(10); + match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off); + + format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %} + interface(MEMORY_INTER) %{ + base($base); + index($index); + scale($scale); + disp($off); + %} +%} + +//FIXME: I think it's better to limit the immI to be 16-bit at most! +// Indirect Memory Plus Long Offset Operand +operand indOffset32(mRegP reg, immL32 off) %{ + constraint(ALLOC_IN_RC(p_reg)); + op_cost(20); + match(AddP reg off); + + format %{ "[$reg + $off (32-bit)] @ indOffset32" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0x0); /* NO_INDEX */ + scale(0x0); + disp($off); + %} +%} + +// Indirect Memory Plus Index Register +operand indIndex(mRegP addr, mRegL index) %{ + constraint(ALLOC_IN_RC(p_reg)); + match(AddP addr index); + + op_cost(20); + format %{"[$addr + $index] @ indIndex" %} + interface(MEMORY_INTER) %{ + base($addr); + index($index); + scale(0x0); + disp(0x0); + %} +%} + +operand indirectNarrowKlass(mRegN reg) +%{ + predicate(Universe::narrow_klass_shift() == 0); + constraint(ALLOC_IN_RC(p_reg)); + op_cost(10); + match(DecodeNKlass reg); + + format %{ "[$reg] @ indirectNarrowKlass" %} + interface(MEMORY_INTER) %{ + base($reg); + index(0x0); + scale(0x0); + disp(0x0); + %} +%} + +operand indOffset8NarrowKlass(mRegN reg, immL8 off) +%{ + predicate(Universe::narrow_klass_shift() == 0); + constraint(ALLOC_IN_RC(p_reg)); + op_cost(10); + match(AddP (DecodeNKlass reg) off); + + format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %} interface(MEMORY_INTER) %{ base($reg); index(0x0); @@ -3948,59 +5581,63 @@ %} %} -operand gsBaseIndexOffset8(mRegP base, mRegL index, immL8 off) -%{ - predicate(UseLoongsonISA); +operand indOffset32NarrowKlass(mRegN reg, immL32 off) +%{ + predicate(Universe::narrow_klass_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); - match(AddP (AddP base index) off); - - op_cost(5); - format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexOffset8" %} + op_cost(10); + match(AddP (DecodeNKlass reg) off); + + format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %} interface(MEMORY_INTER) %{ - base($base); - index($index); + base($reg); + index(0x0); scale(0x0); disp($off); %} %} -operand gsBaseIndexI2LOffset8(mRegP base, mRegI index, immL8 off) -%{ - predicate(UseLoongsonISA); +operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off) +%{ + predicate(Universe::narrow_klass_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); - match(AddP (AddP base (ConvI2L index)) off); - - op_cost(5); - format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexI2LOffset8" %} + match(AddP (AddP (DecodeNKlass reg) lreg) off); + + op_cost(10); + format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %} interface(MEMORY_INTER) %{ - base($base); - index($index); + base($reg); + index($lreg); scale(0x0); disp($off); %} %} -operand gsBaseIndexOffset0(mRegP addr, mRegL index) %{ - predicate(UseLoongsonISA); +operand indIndexNarrowKlass(mRegN reg, mRegL lreg) +%{ + predicate(Universe::narrow_klass_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); - match(AddP addr index); + match(AddP (DecodeNKlass reg) lreg); op_cost(10); - format %{"[$addr + $index] @ gsBaseIndexOffset0" %} + format %{"[$reg + $lreg] @ indIndexNarrowKlass" %} interface(MEMORY_INTER) %{ - base($addr); - index($index); + base($reg); + index($lreg); scale(0x0); disp(0x0); %} %} -operand baseOffset0(mRegP reg) %{ +// Indirect Memory Operand +operand indirectNarrow(mRegN reg) +%{ + predicate(Universe::narrow_oop_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); op_cost(10); - match(reg); - - format %{ "[$reg] @ baseOffset0" %} + match(DecodeN reg); + + format %{ "[$reg] @ indirectNarrow" %} interface(MEMORY_INTER) %{ base($reg); index(0x0); @@ -4009,14 +5646,15 @@ %} %} -operand baseOffset16Narrow(mRegN reg, immL16 off) -%{ - predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); +// Indirect Memory Plus Short Offset Operand +operand indOffset8Narrow(mRegN reg, immL8 off) +%{ + predicate(Universe::narrow_oop_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); + op_cost(10); match(AddP (DecodeN reg) off); - op_cost(5); - format %{ "[$reg + $off (16-bit)] @ baseOffset16Narrow" %} + format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %} interface(MEMORY_INTER) %{ base($reg); index(0x0); @@ -4025,14 +5663,15 @@ %} %} -operand gsBaseIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off) -%{ - predicate(UseLoongsonISA && Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); +// Indirect Memory Plus Index Register Plus Offset Operand +operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off) +%{ + predicate(Universe::narrow_oop_shift() == 0); constraint(ALLOC_IN_RC(p_reg)); match(AddP (AddP (DecodeN reg) lreg) off); - op_cost(5); - format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8Narrow" %} + op_cost(10); + format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %} interface(MEMORY_INTER) %{ base($reg); index($lreg); @@ -4041,14 +5680,29 @@ %} %} -operand baseOffset0Narrow(mRegN reg) -%{ - predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0); +//----------Load Long Memory Operands------------------------------------------ +// The load-long idiom will use it's address expression again after loading +// the first word of the long. If the load-long destination overlaps with +// registers used in the addressing expression, the 2nd half will be loaded +// from a clobbered address. Fix this by requiring that load-long use +// address registers that do not overlap with the load-long target. + +// load-long support +operand load_long_RegP() %{ constraint(ALLOC_IN_RC(p_reg)); - match(DecodeN reg); - - op_cost(10); - format %{ "[$reg] @ baseOffset0Narrow" %} + match(RegP); + match(mRegP); + op_cost(100); + format %{ %} + interface(REG_INTER); +%} + +// Indirect Memory Operand Long +operand load_long_indirect(load_long_RegP reg) %{ + constraint(ALLOC_IN_RC(p_reg)); + match(reg); + + format %{ "[$reg]" %} interface(MEMORY_INTER) %{ base($reg); index(0x0); @@ -4057,14 +5711,11 @@ %} %} -operand baseOffset16NarrowKlass(mRegN reg, immL16 off) -%{ - predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0); - constraint(ALLOC_IN_RC(p_reg)); - match(AddP (DecodeNKlass reg) off); - - op_cost(5); - format %{ "[$reg + $off (16-bit)] @ baseOffset16NarrowKlass" %} +// Indirect Memory Plus Long Offset Operand +operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{ + match(AddP reg off); + + format %{ "[$reg + $off]" %} interface(MEMORY_INTER) %{ base($reg); index(0x0); @@ -4073,87 +5724,6 @@ %} %} -operand baseOffset0NarrowKlass(mRegN reg) -%{ - predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0); - constraint(ALLOC_IN_RC(p_reg)); - match(DecodeNKlass reg); - - op_cost(10); - format %{ "[$reg] @ baseOffset0NarrowKlass" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0x0); - scale(0x0); - disp(0x0); - %} -%} - -operand gsBaseIndexOffset8NarrowKlass(mRegN reg, mRegL lreg, immL8 off) -%{ - predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0); - constraint(ALLOC_IN_RC(p_reg)); - match(AddP (AddP (DecodeNKlass reg) lreg) off); - - op_cost(5); - format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8NarrowKlass" %} - interface(MEMORY_INTER) %{ - base($reg); - index($lreg); - scale(0x0); - disp($off); - %} -%} - -operand gsBaseIndexOffset0NarrowKlass(mRegN reg, mRegL lreg) -%{ - predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0); - constraint(ALLOC_IN_RC(p_reg)); - match(AddP (DecodeNKlass reg) lreg); - - op_cost(10); - format %{"[$reg + $lreg] @ gsBaseIndexOffset0NarrowKlass" %} - interface(MEMORY_INTER) %{ - base($reg); - index($lreg); - scale(0x0); - disp(0x0); - %} -%} - - -//------------------------OPERAND CLASSES-------------------------------------- -opclass memory( - baseOffset16, - gsBaseIndexOffset8, - gsBaseIndexI2LOffset8, - gsBaseIndexOffset0, - baseOffset0, - - baseOffset16Narrow, - gsBaseIndexOffset8Narrow, - baseOffset0Narrow, - - baseOffset16NarrowKlass, - baseOffset0NarrowKlass, - gsBaseIndexOffset8NarrowKlass, - gsBaseIndexOffset0NarrowKlass -); - -// For loading unsigned values -// umemory --> unsigned memory -opclass umemory( - baseOffset16, - baseOffset0, - - baseOffset16Narrow, - baseOffset0Narrow, - - baseOffset16NarrowKlass, - baseOffset0NarrowKlass -); - - //----------Conditional Branch Operands---------------------------------------- // Comparison Op - This is the operation of the comparison, and is limited to // the following set of codes: @@ -4206,6 +5776,55 @@ %} %} +/* +// Comparison Code, unsigned compare. Used by FP also, with +// C2 (unordered) turned into GT or LT already. The other bits +// C0 and C3 are turned into Carry & Zero flags. +operand cmpOpU() %{ + match(Bool); + + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4); + not_equal(0x5); + less(0x2); + greater_equal(0x3); + less_equal(0x6); + greater(0x7); + %} +%} +*/ +/* +// Comparison Code for FP conditional move +operand cmpOp_fcmov() %{ + match(Bool); + + format %{ "" %} + interface(COND_INTER) %{ + equal (0x01); + not_equal (0x02); + greater (0x03); + greater_equal(0x04); + less (0x05); + less_equal (0x06); + %} +%} + +// Comparision Code used in long compares +operand cmpOp_commute() %{ + match(Bool); + + format %{ "" %} + interface(COND_INTER) %{ + equal(0x4); + not_equal(0x5); + less(0xF); + greater_equal(0xE); + less_equal(0xD); + greater(0xC); + %} +%} +*/ //----------Special Memory Operands-------------------------------------------- // Stack Slot Operand - This operand is used for loading and storing temporary @@ -4275,6 +5894,12 @@ disp($reg); // Stack Offset %} %} + + +//------------------------OPERAND CLASSES-------------------------------------- +//opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset ); +opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow); + //----------PIPELINE----------------------------------------------------------- // Rules which define the behavior of the target architectures pipeline. @@ -4628,7 +6253,7 @@ %} // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned) -instruct loadI2UB(mRegI dst, umemory mem, immI_255 mask) %{ +instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{ match(Set dst (AndI (LoadI mem) mask)); ins_cost(125); @@ -4648,7 +6273,7 @@ %} // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned) -instruct loadI2US(mRegI dst, umemory mem, immI_65535 mask) %{ +instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{ match(Set dst (AndI (LoadI mem) mask)); ins_cost(125); @@ -4689,18 +6314,26 @@ ins_pipe( ialu_storeL ); %} - instruct storeL_immL0(memory mem, immL0 zero) %{ match(Set mem (StoreL mem zero)); ins_cost(180); - format %{ "sd $mem, zero #@storeL_immL0" %} - ins_encode(store_L_immL0_enc(mem)); + format %{ "sd zero, $mem #@storeL_immL0" %} + ins_encode(store_L_immL0_enc(mem, zero)); ins_pipe( ialu_storeL ); %} +instruct storeL_imm(memory mem, immL src) %{ + match(Set mem (StoreL mem src)); + + ins_cost(200); + format %{ "sd $src, $mem #@storeL_imm" %} + ins_encode(store_L_immL_enc(mem, src)); + ins_pipe( ialu_storeL ); +%} + // Load Compressed Pointer -instruct loadN(mRegN dst, umemory mem) +instruct loadN(mRegN dst, memory mem) %{ match(Set dst (LoadN mem)); @@ -4710,7 +6343,7 @@ ins_pipe( ialu_loadI ); // XXX %} -instruct loadN2P(mRegP dst, umemory mem) +instruct loadN2P(mRegP dst, memory mem) %{ match(Set dst (DecodeN (LoadN mem))); predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0); @@ -4742,7 +6375,7 @@ %} // Load narrow Klass Pointer -instruct loadNKlass(mRegN dst, umemory mem) +instruct loadNKlass(mRegN dst, memory mem) %{ match(Set dst (LoadNKlass mem)); @@ -4752,7 +6385,7 @@ ins_pipe( ialu_loadI ); // XXX %} -instruct loadN2PKlass(mRegP dst, umemory mem) +instruct loadN2PKlass(mRegP dst, memory mem) %{ match(Set dst (DecodeNKlass (LoadNKlass mem))); predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0); @@ -4867,6 +6500,26 @@ ins_pipe( ialu_storeI ); %} +// Store NULL Pointer, mark word, or other simple pointer constant. +instruct storeImmP(memory mem, immP31 src) %{ + match(Set mem (StoreP mem src)); + + ins_cost(150); + format %{ "mov $mem, $src #@storeImmP" %} + ins_encode(store_P_immP_enc(mem, src)); + ins_pipe( ialu_storeI ); +%} + +// Store Byte Immediate +instruct storeImmB(memory mem, immI8 src) %{ + match(Set mem (StoreB mem src)); + + ins_cost(150); + format %{ "movb $mem, $src #@storeImmB" %} + ins_encode(store_B_immI_enc(mem, src)); + ins_pipe( ialu_storeI ); +%} + // Store Compressed Pointer instruct storeN(memory mem, mRegN src) %{ @@ -4915,8 +6568,28 @@ match(Set mem (StoreN mem zero)); ins_cost(125); // XXX - format %{ "storeN0 $mem, R12\t# compressed ptr" %} - ins_encode(storeImmN0_enc(mem)); + format %{ "storeN0 zero, $mem\t# compressed ptr" %} + ins_encode(storeImmN0_enc(mem, zero)); + ins_pipe( ialu_storeI ); +%} + +instruct storeImmN(memory mem, immN src) +%{ + match(Set mem (StoreN mem src)); + + ins_cost(150); + format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %} + ins_encode(storeImmN_enc(mem, src)); + ins_pipe( ialu_storeI ); +%} + +instruct storeImmNKlass(memory mem, immNKlass src) +%{ + match(Set mem (StoreNKlass mem src)); + + ins_cost(150); // XXX + format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %} + ins_encode(storeImmNKlass_enc(mem, src)); ins_pipe( ialu_storeI ); %} @@ -4930,15 +6603,6 @@ ins_pipe( ialu_storeI ); %} -instruct storeB0(memory mem, immI0 zero) %{ - match(Set mem (StoreB mem zero)); - - ins_cost(100); - format %{ "sb $zero, $mem #@storeB0" %} - ins_encode(store_B0_enc(mem)); - ins_pipe( ialu_storeI ); -%} - instruct storeB_convL2I(memory mem, mRegL src) %{ match(Set mem (StoreB mem (ConvL2I src))); @@ -4968,7 +6632,7 @@ %} // Load Byte (8bit UNsigned) -instruct loadUB(mRegI dst, umemory mem) %{ +instruct loadUB(mRegI dst, memory mem) %{ match(Set dst (LoadUB mem)); ins_cost(125); @@ -4977,7 +6641,7 @@ ins_pipe( ialu_loadI ); %} -instruct loadUB_convI2L(mRegL dst, umemory mem) %{ +instruct loadUB_convI2L(mRegL dst, memory mem) %{ match(Set dst (ConvI2L (LoadUB mem))); ins_cost(125); @@ -5016,12 +6680,12 @@ %} // Store Integer Immediate -instruct storeI0(memory mem, immI0 zero) %{ - match(Set mem (StoreI mem zero)); - - ins_cost(100); - format %{ "sw $mem, $zero #@storeI0" %} - ins_encode(store_I_immI0_enc(mem)); +instruct storeImmI(memory mem, immI src) %{ + match(Set mem (StoreI mem src)); + + ins_cost(150); + format %{ "mov $mem, $src #@storeImmI" %} + ins_encode(store_I_immI_enc(mem, src)); ins_pipe( ialu_storeI ); %} @@ -9047,20 +10711,20 @@ %} */ -instruct lbu_and_lmask(mRegI dst, umemory mem, immI_255 mask) %{ +instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{ match(Set dst (AndI mask (LoadB mem))); ins_cost(60); - format %{ "lbu $dst, $mem #@lbu_and_lmask" %} + format %{ "lhu $dst, $mem #@lbu_and_lmask" %} ins_encode(load_UB_enc(dst, mem)); ins_pipe( ialu_loadI ); %} -instruct lbu_and_rmask(mRegI dst, umemory mem, immI_255 mask) %{ +instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{ match(Set dst (AndI (LoadB mem) mask)); ins_cost(60); - format %{ "lbu $dst, $mem #@lbu_and_rmask" %} + format %{ "lhu $dst, $mem #@lbu_and_rmask" %} ins_encode(load_UB_enc(dst, mem)); ins_pipe( ialu_loadI ); %} @@ -10704,7 +12368,7 @@ // Prefetch instructions. -instruct prefetchrNTA( umemory mem ) %{ +instruct prefetchrNTA( memory mem ) %{ match(PrefetchRead mem); ins_cost(125); @@ -10715,14 +12379,29 @@ int scale = $mem$$scale; int disp = $mem$$disp; - assert(index == 0, "no index"); - __ daddiu(AT, as_Register(base), disp); + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + } else { + __ move(AT, as_Register(base)); + } + if( Assembler::is_simm16(disp) ) { + __ daddiu(AT, as_Register(base), disp); + __ daddiu(AT, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + } __ pref(0, AT, 0); //hint: 0:load %} ins_pipe(pipe_slow); %} -instruct prefetchwNTA( umemory mem ) %{ +instruct prefetchwNTA( memory mem ) %{ match(PrefetchWrite mem); ins_cost(125); format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %} @@ -10732,9 +12411,24 @@ int scale = $mem$$scale; int disp = $mem$$disp; - assert(index == 0, "no index"); - __ daddiu(AT, as_Register(base), disp); - __ pref(1, AT, 0); //hint: 1:store + if( index != 0 ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, as_Register(base), AT); + } + } else { + __ move(AT, as_Register(base)); + } + if( Assembler::is_simm16(disp) ) { + __ daddiu(AT, as_Register(base), disp); + __ daddiu(AT, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + } + __ pref(1, AT, 0); //hint: 1:store %} ins_pipe(pipe_slow); %} @@ -10754,10 +12448,50 @@ Register dst = R0; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gslbx(dst, as_Register(base), as_Register(index), disp); + if( Assembler::is_simm16(disp) ) { + if( UseLoongsonISA ) { + if (scale == 0) { + __ gslbx(dst, as_Register(base), as_Register(index), disp); + } else { + __ dsll(AT, as_Register(index), scale); + __ gslbx(dst, as_Register(base), AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ lb(dst, AT, disp); + } + } else { + if (scale == 0) { + __ addu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(AT, as_Register(index), scale); + __ addu(AT, as_Register(base), AT); + } + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslbx(dst, AT, T9, 0); + } else { + __ addu(AT, AT, T9); + __ lb(dst, AT, 0); + } + } } else { - __ lb(dst, as_Register(base), disp); + if( Assembler::is_simm16(disp) ) { + __ lb(dst, as_Register(base), disp); + } else { + __ move(T9, disp); + if( UseLoongsonISA ) { + __ gslbx(dst, as_Register(base), T9, 0); + } else { + __ addu(AT, as_Register(base), T9); + __ lb(dst, AT, 0); + } + } } %} ins_pipe(pipe_slow); @@ -10778,7 +12512,7 @@ %} // Load Char (16bit unsigned) -instruct loadUS(mRegI dst, umemory mem) %{ +instruct loadUS(mRegI dst, memory mem) %{ match(Set dst (LoadUS mem)); ins_cost(125); @@ -10787,7 +12521,7 @@ ins_pipe( ialu_loadI ); %} -instruct loadUS_convI2L(mRegL dst, umemory mem) %{ +instruct loadUS_convI2L(mRegL dst, memory mem) %{ match(Set dst (ConvI2L (LoadUS mem))); ins_cost(125); @@ -10862,7 +12596,7 @@ ins_encode %{ FloatRegister dst = as_FloatRegister($dst$$reg); - __ dmtc1(R0, dst); + __ dmtc1(R0, dst); %} ins_pipe( fpu_loadF ); %} @@ -10912,10 +12646,66 @@ int disp = $mem$$disp; if( index != 0 ) { - assert(UseLoongsonISA, "Only supported for Loongson CPUs"); - __ gsswx(R0, as_Register(base), as_Register(index), disp); - } else { - __ sw(R0, as_Register(base), disp); + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp, 8) ) { + if ( scale == 0 ) { + __ gsswx(R0, as_Register(base), as_Register(index), disp); + } else { + __ dsll(T9, as_Register(index), scale); + __ gsswx(R0, as_Register(base), T9, disp); + } + } else if ( Assembler::is_simm16(disp) ) { + if ( scale == 0 ) { + __ daddu(AT, as_Register(base), as_Register(index)); + } else { + __ dsll(T9, as_Register(index), scale); + __ daddu(AT, as_Register(base), T9); + } + __ sw(R0, AT, disp); + } else { + if ( scale == 0 ) { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + __ gsswx(R0, as_Register(base), AT, 0); + } else { + __ dsll(T9, as_Register(index), scale); + __ move(AT, disp); + __ daddu(AT, AT, T9); + __ gsswx(R0, as_Register(base), AT, 0); + } + } + } else { //not use loongson isa + if(scale != 0) { + __ dsll(T9, as_Register(index), scale); + __ daddu(AT, as_Register(base), T9); + } else { + __ daddu(AT, as_Register(base), as_Register(index)); + } + if( Assembler::is_simm16(disp) ) { + __ sw(R0, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sw(R0, AT, 0); + } + } + } else { //index is 0 + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + __ sw(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + __ gsswx(R0, as_Register(base), T9, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ sw(R0, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sw(R0, AT, 0); + } + } } %} ins_pipe( ialu_storeI ); @@ -10950,6 +12740,87 @@ ins_pipe( fpu_storeF ); %} +instruct storeD_imm0( memory mem, immD0 zero) %{ + match(Set mem (StoreD mem zero)); + + ins_cost(40); + format %{ "store $mem, zero\t# store float @ storeD_imm0" %} + ins_encode %{ + int base = $mem$$base; + int index = $mem$$index; + int scale = $mem$$scale; + int disp = $mem$$disp; + + __ mtc1(R0, F30); + __ cvt_d_w(F30, F30); + + if( index != 0 ) { + if ( UseLoongsonISA ) { + if ( Assembler::is_simm(disp, 8) ) { + if (scale == 0) { + __ gssdxc1(F30, as_Register(base), as_Register(index), disp); + } else { + __ dsll(T9, as_Register(index), scale); + __ gssdxc1(F30, as_Register(base), T9, disp); + } + } else if ( Assembler::is_simm16(disp) ) { + if (scale == 0) { + __ daddu(AT, as_Register(base), as_Register(index)); + __ sdc1(F30, AT, disp); + } else { + __ dsll(T9, as_Register(index), scale); + __ daddu(AT, as_Register(base), T9); + __ sdc1(F30, AT, disp); + } + } else { + if (scale == 0) { + __ move(T9, disp); + __ daddu(AT, as_Register(index), T9); + __ gssdxc1(F30, as_Register(base), AT, 0); + } else { + __ move(T9, disp); + __ dsll(AT, as_Register(index), scale); + __ daddu(AT, AT, T9); + __ gssdxc1(F30, as_Register(base), AT, 0); + } + } + } else { // not use loongson isa + if(scale != 0) { + __ dsll(T9, as_Register(index), scale); + __ daddu(AT, as_Register(base), T9); + } else { + __ daddu(AT, as_Register(base), as_Register(index)); + } + if( Assembler::is_simm16(disp) ) { + __ sdc1(F30, AT, disp); + } else { + __ move(T9, disp); + __ daddu(AT, AT, T9); + __ sdc1(F30, AT, 0); + } + } + } else {// index is 0 + if ( UseLoongsonISA ) { + if ( Assembler::is_simm16(disp) ) { + __ sdc1(F30, as_Register(base), disp); + } else { + __ move(T9, disp); + __ gssdxc1(F30, as_Register(base), T9, 0); + } + } else { + if( Assembler::is_simm16(disp) ) { + __ sdc1(F30, as_Register(base), disp); + } else { + __ move(T9, disp); + __ daddu(AT, as_Register(base), T9); + __ sdc1(F30, AT, 0); + } + } + } + %} + ins_pipe( ialu_storeI ); +%} + instruct loadSSI(mRegI dst, stackSlotI src) %{ match(Set dst src); @@ -11108,21 +12979,13 @@ %} // Store CMS card-mark Immediate -instruct storeImmCM(memory mem, mRegI src) %{ +instruct storeImmCM(memory mem, immI8 src) %{ match(Set mem (StoreCM mem src)); - ins_cost(500); - format %{ "sb $src, $mem (CMS card-mark) @ storeImmCM" %} - ins_encode(store_B_reg_sync_enc(mem, src)); - ins_pipe( ialu_storeI ); -%} - -instruct storeI0CM(memory mem, immI0 zero) %{ - match(Set mem (StoreCM mem zero)); - - ins_cost(450); - format %{ "sb $zero, $mem (CMS card-mark) @ storeI0CM" %} - ins_encode(store_B0_sync_enc(mem)); + ins_cost(150); + format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %} +// opcode(0xC6); + ins_encode(store_B_immI_enc_sync(mem, src)); ins_pipe( ialu_storeI ); %} @@ -11143,6 +13006,72 @@ ins_pipe( pipe_jump ); %} +instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem) +%{ + predicate(Universe::narrow_oop_shift() == 0); + match(Set dst mem); + + ins_cost(110); + format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %} + ins_encode %{ + Register dst = $dst$$Register; + Register base = as_Register($mem$$base); + int disp = $mem$$disp; + + __ daddiu(dst, base, disp); + %} + ins_pipe( ialu_regI_imm16 ); +%} + +instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem) +%{ + match(Set dst mem); + + ins_cost(110); + format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %} + ins_encode %{ + Register dst = $dst$$Register; + Register base = as_Register($mem$$base); + Register index = as_Register($mem$$index); + int scale = $mem$$scale; + int disp = $mem$$disp; + + if (scale == 0) { + __ daddu(AT, base, index); + __ daddiu(dst, AT, disp); + } else { + __ dsll(AT, index, scale); + __ daddu(AT, base, AT); + __ daddiu(dst, AT, disp); + } + %} + + ins_pipe( ialu_regI_imm16 ); +%} + +instruct leaPIdxScale(mRegP dst, indIndexScale mem) +%{ + match(Set dst mem); + + ins_cost(110); + format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %} + ins_encode %{ + Register dst = $dst$$Register; + Register base = as_Register($mem$$base); + Register index = as_Register($mem$$index); + int scale = $mem$$scale; + + if (scale == 0) { + __ daddu(dst, base, index); + } else { + __ dsll(AT, index, scale); + __ daddu(dst, base, AT); + } + %} + + ins_pipe( ialu_regI_imm16 ); +%} + // Jump Direct Conditional - Label defines a relative address from Jcc+1 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{ match(CountedLoopEnd cop (CmpI src1 src2)); @@ -11658,7 +13587,7 @@ // Match loading integer and casting it to unsigned int in long register. // LoadI + ConvI2L + AndL 0xffffffff. -instruct loadUI2L_rmask(mRegL dst, umemory mem, immL_32bits mask) %{ +instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{ match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %} @@ -11666,7 +13595,7 @@ ins_pipe(ialu_loadI); %} -instruct loadUI2L_lmask(mRegL dst, umemory mem, immL_32bits mask) %{ +instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{ match(Set dst (AndL mask (ConvI2L (LoadI mem)))); format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}