diff --git a/riscv/decode.h b/riscv/decode.h index 1d6ea9a7..5f566ce1 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -852,6 +852,18 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ float##width##_t vs2 = P.VU.elt(rs2_num, i); +#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + // // vector: integer and masking operation loop // @@ -2369,6 +2381,64 @@ reg_t index[P.VU.vlmax]; \ for (reg_t i=P.VU.vstart->read(); i(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_f16(vs2); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_f32(vs2); -}, -{ - ; -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('D')); -}, -false, (P.VU.vsew >= 16)) +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { vd = f32_to_f16(vs2); }, // BODY32 + { vd = f64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('D'); } // CHECK64 +) diff --git a/riscv/insns/vfncvt_f_x_w.h b/riscv/insns/vfncvt_f_x_w.h index 10a6f7b1..d587be26 100644 --- a/riscv/insns/vfncvt_f_x_w.h +++ b/riscv/insns/vfncvt_f_x_w.h @@ -1,23 +1,10 @@ // vfncvt.f.x.v vd, vs2, vm -VI_VFP_CVT_SCALE -({ - ; -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = i32_to_f16(vs2); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = i64_to_f32(vs2); -}, -{ - ; -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -false, (P.VU.vsew >= 16)) +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = i32_to_f16(vs2); }, // BODY32 + { vd = i64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + int // sign +) diff --git a/riscv/insns/vfncvt_f_xu_w.h b/riscv/insns/vfncvt_f_xu_w.h index 32b4b026..5e0e34fa 100644 --- a/riscv/insns/vfncvt_f_xu_w.h +++ b/riscv/insns/vfncvt_f_xu_w.h @@ -1,23 +1,10 @@ // vfncvt.f.xu.v vd, vs2, vm -VI_VFP_CVT_SCALE -({ - ; -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = ui32_to_f16(vs2); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = ui64_to_f32(vs2); -}, -{ - ; -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -false, (P.VU.vsew >= 16)) +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = ui32_to_f16(vs2); }, // BODY32 + { vd = ui64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + uint // sign +) diff --git a/riscv/insns/vfncvt_rod_f_f_w.h b/riscv/insns/vfncvt_rod_f_f_w.h index 20a14c90..89bdc05f 100644 --- a/riscv/insns/vfncvt_rod_f_f_w.h +++ b/riscv/insns/vfncvt_rod_f_f_w.h @@ -1,25 +1,15 @@ // vfncvt.rod.f.f.v vd, vs2, vm -VI_VFP_CVT_SCALE -({ - ; -}, -{ - softfloat_roundingMode = softfloat_round_odd; - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_f16(vs2); -}, -{ - softfloat_roundingMode = softfloat_round_odd; - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_f32(vs2); -}, -{ - ; -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -false, (P.VU.vsew >= 16)) +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { // BODY32 + softfloat_roundingMode = softfloat_round_odd; + vd = f32_to_f16(vs2); + }, + { // BODY64 + softfloat_roundingMode = softfloat_round_odd; + vd = f64_to_f32(vs2); + }, + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); } // CHECK64 +) diff --git a/riscv/insns/vfncvt_rtz_x_f_w.h b/riscv/insns/vfncvt_rtz_x_f_w.h index 0629b8d9..23b4d5e2 100644 --- a/riscv/insns/vfncvt_rtz_x_f_w.h +++ b/riscv/insns/vfncvt_rtz_x_f_w.h @@ -1,24 +1,10 @@ // vfncvt.rtz.x.f.w vd, vs2, vm -VI_VFP_CVT_SCALE -({ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f16_to_i8(vs2, softfloat_round_minMag, true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_i16(vs2, softfloat_round_minMag, true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_i32(vs2, softfloat_round_minMag, true); -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -{ - require(p->extension_enabled('D')); -}, -false, (P.VU.vsew <= 32)) +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/riscv/insns/vfncvt_rtz_xu_f_w.h b/riscv/insns/vfncvt_rtz_xu_f_w.h index 82aa63e8..f55c680b 100644 --- a/riscv/insns/vfncvt_rtz_xu_f_w.h +++ b/riscv/insns/vfncvt_rtz_xu_f_w.h @@ -1,24 +1,10 @@ // vfncvt.rtz.xu.f.w vd, vs2, vm -VI_VFP_CVT_SCALE -({ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f16_to_ui8(vs2, softfloat_round_minMag, true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_ui16(vs2, softfloat_round_minMag, true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_ui32(vs2, softfloat_round_minMag, true); -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -{ - require(p->extension_enabled('D')); -}, -false, (P.VU.vsew <= 32)) +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +) diff --git a/riscv/insns/vfncvt_x_f_w.h b/riscv/insns/vfncvt_x_f_w.h index a8a6dfb1..a7f3c334 100644 --- a/riscv/insns/vfncvt_x_f_w.h +++ b/riscv/insns/vfncvt_x_f_w.h @@ -1,24 +1,10 @@ // vfncvt.x.f.w vd, vs2, vm -VI_VFP_CVT_SCALE -({ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f16_to_i8(vs2, STATE.frm->read(), true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_i16(vs2, STATE.frm->read(), true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_i32(vs2, STATE.frm->read(), true); -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -{ - require(p->extension_enabled('D')); -}, -false, (P.VU.vsew <= 32)) +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/riscv/insns/vfncvt_xu_f_w.h b/riscv/insns/vfncvt_xu_f_w.h index bff733e3..02046e8b 100644 --- a/riscv/insns/vfncvt_xu_f_w.h +++ b/riscv/insns/vfncvt_xu_f_w.h @@ -1,24 +1,10 @@ // vfncvt.xu.f.w vd, vs2, vm -VI_VFP_CVT_SCALE -({ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f16_to_ui8(vs2, STATE.frm->read(), true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f32_to_ui16(vs2, STATE.frm->read(), true); -}, -{ - auto vs2 = P.VU.elt(rs2_num, i); - P.VU.elt(rd_num, i, true) = f64_to_ui32(vs2, STATE.frm->read(), true); -}, -{ - require(p->extension_enabled(EXT_ZFH)); -}, -{ - require(p->extension_enabled('F')); -}, -{ - require(p->extension_enabled('D')); -}, -false, (P.VU.vsew <= 32)) +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +)