Browse Source

Simplify floating point compare instructions

pull/879/head
eopXD 4 years ago
parent
commit
626fce2075
  1. 48
      riscv/decode.h
  2. 5
      riscv/insns/vmfeq_vf.h
  3. 5
      riscv/insns/vmfeq_vv.h
  4. 5
      riscv/insns/vmfge_vf.h
  5. 5
      riscv/insns/vmfgt_vf.h
  6. 5
      riscv/insns/vmfle_vf.h
  7. 5
      riscv/insns/vmfle_vv.h
  8. 5
      riscv/insns/vmflt_vf.h
  9. 5
      riscv/insns/vmflt_vv.h
  10. 5
      riscv/insns/vmfne_vf.h
  11. 5
      riscv/insns/vmfne_vv.h

48
riscv/decode.h

@ -1882,7 +1882,7 @@ reg_t index[P.VU.vlmax]; \
for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
VI_LOOP_ELEMENT_SKIP(); \
uint64_t mmask = UINT64_C(1) << mpos; \
uint64_t &vdi = P.VU.elt<uint64_t>(rd_num, midx, true); \
uint64_t &vd = P.VU.elt<uint64_t>(rd_num, midx, true); \
uint64_t res = 0;
#define VI_VFP_LOOP_REDUCTION_BASE(width) \
@ -1961,7 +1961,7 @@ reg_t index[P.VU.vlmax]; \
case e16: \
case e32: \
case e64: { \
vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
vd = (vd & ~mmask) | (((res) << mpos) & mmask); \
break; \
} \
default: \
@ -2119,30 +2119,52 @@ reg_t index[P.VU.vlmax]; \
DEBUG_RVV_FP_VF; \
VI_VFP_LOOP_END
#define VI_VFP_LOOP_CMP(BODY16, BODY32, BODY64, is_vs1) \
VI_CHECK_MSS(is_vs1); \
#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \
VI_CHECK_MSS(true); \
VI_VFP_LOOP_CMP_BASE \
switch(P.VU.vsew) { \
case e16: {\
float16_t vs2 = P.VU.elt<float16_t>(rs2_num, i); \
float16_t vs1 = P.VU.elt<float16_t>(rs1_num, i); \
float16_t rs1 = f16(READ_FREG(rs1_num)); \
VFP_VV_PARAMS(16); \
BODY16; \
set_fp_exceptions; \
break; \
}\
case e32: {\
float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
float32_t rs1 = f32(READ_FREG(rs1_num)); \
VFP_VV_PARAMS(32); \
BODY32; \
set_fp_exceptions; \
break; \
}\
case e64: {\
float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
float64_t vs1 = P.VU.elt<float64_t>(rs1_num, i); \
float64_t rs1 = f64(READ_FREG(rs1_num)); \
VFP_VV_PARAMS(64); \
BODY64; \
set_fp_exceptions; \
break; \
}\
default: \
require(0); \
break; \
}; \
VI_VFP_LOOP_CMP_END \
#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \
VI_CHECK_MSS(false); \
VI_VFP_LOOP_CMP_BASE \
switch(P.VU.vsew) { \
case e16: {\
VFP_VF_PARAMS(16); \
BODY16; \
set_fp_exceptions; \
break; \
}\
case e32: {\
VFP_VF_PARAMS(32); \
BODY32; \
set_fp_exceptions; \
break; \
}\
case e64: {\
VFP_VF_PARAMS(64); \
BODY64; \
set_fp_exceptions; \
break; \

5
riscv/insns/vmfeq_vf.h

@ -1,5 +1,5 @@
// vmfeq.vf vd, vs2, fs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = f16_eq(vs2, rs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_eq(vs2, rs1);
},
false)
})

5
riscv/insns/vmfeq_vv.h

@ -1,5 +1,5 @@
// vmfeq.vv vd, vs2, vs1
VI_VFP_LOOP_CMP
VI_VFP_VV_LOOP_CMP
({
res = f16_eq(vs2, vs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_eq(vs2, vs1);
},
true)
})

5
riscv/insns/vmfge_vf.h

@ -1,5 +1,5 @@
// vmfge.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = f16_le(rs1, vs2);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_le(rs1, vs2);
},
false)
})

5
riscv/insns/vmfgt_vf.h

@ -1,5 +1,5 @@
// vmfgt.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = f16_lt(rs1, vs2);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_lt(rs1, vs2);
},
false)
})

5
riscv/insns/vmfle_vf.h

@ -1,5 +1,5 @@
// vmfle.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = f16_le(vs2, rs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_le(vs2, rs1);
},
false)
})

5
riscv/insns/vmfle_vv.h

@ -1,5 +1,5 @@
// vmfle.vv vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VV_LOOP_CMP
({
res = f16_le(vs2, vs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_le(vs2, vs1);
},
true)
})

5
riscv/insns/vmflt_vf.h

@ -1,5 +1,5 @@
// vmflt.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = f16_lt(vs2, rs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_lt(vs2, rs1);
},
false)
})

5
riscv/insns/vmflt_vv.h

@ -1,5 +1,5 @@
// vmflt.vv vd, vs2, vs1
VI_VFP_LOOP_CMP
VI_VFP_VV_LOOP_CMP
({
res = f16_lt(vs2, vs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = f64_lt(vs2, vs1);
},
true)
})

5
riscv/insns/vmfne_vf.h

@ -1,5 +1,5 @@
// vmfne.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VF_LOOP_CMP
({
res = !f16_eq(vs2, rs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = !f64_eq(vs2, rs1);
},
false)
})

5
riscv/insns/vmfne_vv.h

@ -1,5 +1,5 @@
// vmfne.vv vd, vs2, rs1
VI_VFP_LOOP_CMP
VI_VFP_VV_LOOP_CMP
({
res = !f16_eq(vs2, vs1);
},
@ -8,5 +8,4 @@ VI_VFP_LOOP_CMP
},
{
res = !f64_eq(vs2, vs1);
},
true)
})

Loading…
Cancel
Save