@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s)
}
}
/* Destination vector register group cannot overlap source mask register. */
static bool require_vm(int vm, int vd)
/*
* Source and destination vector register groups cannot overlap source mask
* register:
*
* A vector register cannot be used to provide source operands with more than
* one EEW for a single instruction. A mask register source is considered to
* have EEW=1 for this constraint. An encoding that would result in the same
* vector register being read with two or more different EEWs, including when
* the vector register appears at different positions within two or more vector
* register groups, is reserved.
* (Section 5.2)
*
* A destination vector register group can overlap a source vector
* register group only if one of the following holds:
* 1. The destination EEW equals the source EEW.
* 2. The destination EEW is smaller than the source EEW and the overlap
* is in the lowest-numbered part of the source register group.
* 3. The destination EEW is greater than the source EEW, the source EMUL
* is at least 1, and the overlap is in the highest-numbered part of
* the destination register group.
* For the purpose of determining register group overlap constraints, mask
* elements have EEW=1.
* (Section 5.2)
*/
static bool require_vm(int vm, int v)
{
return (vm != 0 || vd != 0);
return (vm != 0 || v != 0);
}
static bool require_nf(int vd, int nf, int lmul)
@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
return ret;
}
/*
* Check whether a vector register is used to provide source operands with
* more than one EEW for the vector instruction.
* Returns true if the instruction has valid encoding
* Returns false if encoding violates the mismatched input EEWs constraint
*/
static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1,
int vs2, uint8_t eew_vs2, int vm)
{
bool is_valid = true;
int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul;
int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul;
/* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */
if ((vs1 != -1 & & !require_vm(vm, vs1)) ||
(vs2 != -1 & & !require_vm(vm, vs2))) {
is_valid = false;
}
/* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */
if ((vs1 != -1 & & vs2 != -1) & & (eew_vs1 != eew_vs2) & &
is_overlapped(vs1, 1 < < MAX ( emul_vs1 , 0 ) ,
vs2, 1 < < MAX ( emul_vs2 , 0 ) ) ) {
is_valid = false;
}
return is_valid;
}
static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
{
return require_vm(vm, vd) & &
require_align(vd, s->lmul) & &
require_align(vs, s->lmul);
require_align(vs, s->lmul) & &
vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm);
}
/*
@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ss(s, vd, vs2, vm) & &
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) & &
require_align(vs1, s->lmul);
}
@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) & &
vext_check_input_eew(s, vs, s->sew, -1, 0, vm) & &
require_align(vs, s->lmul) & &
require_noover(vd, s->lmul + 1, vs, s->lmul);
}
@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) & &
vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) & &
require_align(vs, s->lmul + 1);
}
@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs2, vm) & &
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) & &
require_align(vs1, s->lmul) & &
require_noover(vd, s->lmul + 1, vs1, s->lmul);
}
@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs1, vm) & &
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) & &
require_align(vs2, s->lmul + 1);
}
static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
{
bool ret = vext_narrow_check_common(s, vd, vs, vm);
bool ret = vext_narrow_check_common(s, vd, vs, vm) & &
vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm);
if (vd != vs) {
ret & = require_noover(vd, s->lmul, vs, s->lmul + 1);
}
@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_sd(s, vd, vs2, vm) & &
vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) & &
require_align(vs1, s->lmul);
}
@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2,
{
bool ret = require_align(vs2, s->lmul) & &
require_align(vd, s->lmul) & &
require_vm(vm, vd);
require_vm(vm, vd) & &
vext_check_input_eew(s, -1, 0, vs2, s->sew, vm);
if (is_over) {
ret & = (vd != vs2);
}
@ -802,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
*** stride load and store
* MAXSZ returns the maximum vector size can be operated in bytes,
* which is used in GVEC IR when vl_eq_vlmax flag is set to true
* to accelerate vector operation.
*/
static inline uint32_t MAXSZ(DisasContext *s)
{
int max_sz = s->cfg_ptr->vlenb < < 3 ;
return max_sz >> (3 - s->lmul);
}
static inline uint32_t get_log2(uint32_t a)
{
uint32_t i = 0;
for (; a > 0;) {
a >>= 1;
i++;
}
return i;
}
typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
/*
* Simulate the strided load/store main loop:
*
* for (i = env->vstart; i < env- > vl; env->vstart = ++i) {
* k = 0;
* while (k < nf ) {
* if (!vm & & !vext_elem_mask(v0, i)) {
* vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
* (i + k * max_elems + 1) * esz);
* k++;
* continue;
* }
* target_ulong addr = base + stride * i + (k < < log2_esz ) ;
* ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
* k++;
* }
* }
*/
typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
TCGv, TCGv_env, TCGv_i32);
static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
uint32_t rs2, uint32_t vm, uint32_t nf,
gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
bool is_load)
{
TCGv addr = tcg_temp_new();
TCGv base = get_gpr(s, rs1, EXT_NONE);
TCGv stride = get_gpr(s, rs2, EXT_NONE);
TCGv i = tcg_temp_new();
TCGv i_esz = tcg_temp_new();
TCGv k = tcg_temp_new();
TCGv k_esz = tcg_temp_new();
TCGv k_max = tcg_temp_new();
TCGv mask = tcg_temp_new();
TCGv mask_offs = tcg_temp_new();
TCGv mask_offs_64 = tcg_temp_new();
TCGv mask_elem = tcg_temp_new();
TCGv mask_offs_rem = tcg_temp_new();
TCGv vreg = tcg_temp_new();
TCGv dest_offs = tcg_temp_new();
TCGv stride_offs = tcg_temp_new();
uint32_t max_elems = MAXSZ(s) >> s->sew;
TCGLabel *start = gen_new_label();
TCGLabel *end = gen_new_label();
TCGLabel *start_k = gen_new_label();
TCGLabel *inc_k = gen_new_label();
TCGLabel *end_k = gen_new_label();
MemOp atomicity = MO_ATOM_NONE;
if (s->sew == 0) {
atomicity = MO_ATOM_NONE;
} else {
atomicity = MO_ATOM_IFALIGN_PAIR;
}
mark_vs_dirty(s);
tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
/* Start of outer loop. */
tcg_gen_mov_tl(i, cpu_vstart);
gen_set_label(start);
tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
tcg_gen_shli_tl(i_esz, i, s->sew);
/* Start of inner loop. */
tcg_gen_movi_tl(k, 0);
gen_set_label(start_k);
tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
/*
* If we are in mask agnostic regime and the operation is not unmasked we
* set the inactive elements to 1.
*/
if (!vm & & s->vma) {
TCGLabel *active_element = gen_new_label();
/* (i + k * max_elems) * esz */
tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems < < s- > sew));
tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
/*
* Check whether the i bit of the mask is 0 or 1.
*
* static inline int vext_elem_mask(void *v0, int index)
* {
* int idx = index / 64;
* int pos = index % 64;
* return (((uint64_t *)v0)[idx] >> pos) & 1;
* }
*/
tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
tcg_gen_andi_tl(mask_elem, mask_elem, 1);
tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
active_element);
/*
* Set masked-off elements in the destination vector register to 1s.
* Store instructions simply skip this bit as memory ops access memory
* only for active elements.
*/
if (is_load) {
tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
tcg_gen_add_tl(mask_offs, mask_offs, dest);
st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
}
tcg_gen_br(inc_k);
gen_set_label(active_element);
}
/*
* The element is active, calculate the address with stride:
* target_ulong addr = base + stride * i + (k < < log2_esz ) ;
*/
tcg_gen_mul_tl(stride_offs, stride, i);
tcg_gen_shli_tl(k_esz, k, s->sew);
tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
tcg_gen_add_tl(addr, base, stride_offs);
/* Calculate the offset in the dst/src vector register. */
tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
tcg_gen_add_tl(dest_offs, i, k_max);
tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
tcg_gen_add_tl(dest_offs, dest_offs, dest);
if (is_load) {
tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
} else {
ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
}
/*
* We don't execute the load/store above if the element was inactive.
* We jump instead directly to incrementing k and continuing the loop.
*/
if (!vm & & s->vma) {
gen_set_label(inc_k);
}
tcg_gen_addi_tl(k, k, 1);
tcg_gen_br(start_k);
/* End of the inner loop. */
gen_set_label(end_k);
tcg_gen_addi_tl(i, i, 1);
tcg_gen_mov_tl(cpu_vstart, i);
tcg_gen_br(start);
/* End of the outer loop. */
gen_set_label(end);
return;
}
/*
* Set the tail bytes of the strided loads/stores to 1:
*
* for (k = 0; k < nf ; + + k ) {
* cnt = (k * max_elems + vl) * esz;
* tot = (k * max_elems + max_elems) * esz;
* for (i = cnt; i < tot ; i + = esz ) {
* store_1s(-1, vd[vl+i]);
* }
* }
*/
static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
gen_tl_ldst *st_fn)
{
TCGv i = tcg_temp_new();
TCGv k = tcg_temp_new();
TCGv tail_cnt = tcg_temp_new();
TCGv tail_tot = tcg_temp_new();
TCGv tail_addr = tcg_temp_new();
TCGLabel *start = gen_new_label();
TCGLabel *end = gen_new_label();
TCGLabel *start_i = gen_new_label();
TCGLabel *end_i = gen_new_label();
uint32_t max_elems_b = MAXSZ(s);
uint32_t esz = 1 < < s- > sew;
/* Start of the outer loop. */
tcg_gen_movi_tl(k, 0);
tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
tcg_gen_movi_tl(tail_tot, max_elems_b);
tcg_gen_add_tl(tail_addr, dest, tail_cnt);
gen_set_label(start);
tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
/* Start of the inner loop. */
tcg_gen_mov_tl(i, tail_cnt);
gen_set_label(start_i);
tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
/* store_1s(-1, vd[vl+i]); */
st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
tcg_gen_addi_tl(tail_addr, tail_addr, esz);
tcg_gen_addi_tl(i, i, esz);
tcg_gen_br(start_i);
/* End of the inner loop. */
gen_set_label(end_i);
/* Update the counts */
tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
tcg_gen_addi_tl(k, k, 1);
tcg_gen_br(start);
/* End of the outer loop. */
gen_set_label(end);
return;
}
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
uint32_t data, gen_helper_ldst_stride *fn,
DisasContext *s)
uint32_t data, DisasContext *s, bool is_load)
{
TCGv_ptr dest, mask;
TCGv base, stride;
TCGv_i32 desc;
if (!s->vstart_eq_zero) {
return fals e;
}
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
stride = get_gpr(s, rs2, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
TCGv dest = tcg_temp_new();
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
uint32_t nf = FIELD_EX32(data, VDATA, NF);
uint32_t vm = FIELD_EX32(data, VDATA, VM);
/* Destination register and mask register */
tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
/*
* Select the appropriate load/tore to retrieve data from the vector
* register given a specific sew.
*/
static gen_tl_ldst * const ld_fns[4] = {
tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
tcg_gen_ld32u_tl, tcg_gen_ld_tl
};
static gen_tl_ldst * const st_fns[4] = {
tcg_gen_st8_tl, tcg_gen_st16_tl,
tcg_gen_st32_tl, tcg_gen_st_tl
};
gen_tl_ldst *ld_fn = ld_fns[s->sew];
gen_tl_ldst *st_fn = st_fns[s->sew];
if (ld_fn == NULL || st_fn == NULL) {
return false;
}
mark_vs_dirty(s);
fn(dest, mask, base, stride, tcg_env, desc);
gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
tcg_gen_movi_tl(cpu_vstart, 0);
/*
* Set the tail bytes to 1 if tail agnostic:
*/
if (s->vta != 0 & & is_load) {
gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
}
finalize_rvv_inst(s);
return true;
@ -836,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[4] = {
gen_helper_vlse8_v, gen_helper_vlse16_v,
gen_helper_vlse32_v, gen_helper_vlse64_v
};
fn = fns[eew];
if (fn == NULL) {
return false;
}
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@ -853,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s );
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true );
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -871,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
static gen_helper_ldst_stride * const fns[4] = {
/* masked stride store */
gen_helper_vsse8_v, gen_helper_vsse16_v,
gen_helper_vsse32_v, gen_helper_vsse64_v
};
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
fn = fns[eew];
if (fn == NULL) {
return false;
}
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s );
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@ -981,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) & &
vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
@ -1033,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) & &
vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
@ -1100,25 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
gen_helper_ldst_whole *fn,
DisasContext *s)
uint32_t log2_esz, gen_helper_ldst_whole *fn,
DisasContext *s, bool is_load )
{
TCGv_ptr dest;
TCGv base;
TCGv_i32 desc;
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
data = FIELD_DP32(data, VDATA, VM, 1);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
mark_vs_dirty(s);
fn(dest, base, tcg_env, desc);
/*
* Load/store multiple bytes per iteration.
* When possible do this atomically.
* Update vstart with the number of processed elements.
* Use the helper function if either:
* - vstart is not 0.
* - the target has 32 bit registers and we are loading/storing 64 bit long
* elements. This is to ensure that we process every element with a single
* memory instruction.
*/
bool use_helper_fn = !(s->vstart_eq_zero) ||
(TCG_TARGET_REG_BITS == 32 & & log2_esz == 3);
if (!use_helper_fn) {
TCGv addr = tcg_temp_new();
uint32_t size = s->cfg_ptr->vlenb * nf;
TCGv_i64 t8 = tcg_temp_new_i64();
TCGv_i32 t4 = tcg_temp_new_i32();
MemOp atomicity = MO_ATOM_NONE;
if (log2_esz == 0) {
atomicity = MO_ATOM_NONE;
} else {
atomicity = MO_ATOM_IFALIGN_PAIR;
}
if (TCG_TARGET_REG_BITS == 64) {
for (int i = 0; i < size ; i + = 8 ) {
addr = get_address(s, rs1, i);
if (is_load) {
tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx,
MO_LE | MO_64 | atomicity);
tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
} else {
tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
tcg_gen_qemu_st_i64(t8, addr, s->mem_idx,
MO_LE | MO_64 | atomicity);
}
if (i == size - 8) {
tcg_gen_movi_tl(cpu_vstart, 0);
} else {
tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz);
}
}
} else {
for (int i = 0; i < size ; i + = 4 ) {
addr = get_address(s, rs1, i);
if (is_load) {
tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx,
MO_LE | MO_32 | atomicity);
tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
} else {
tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
tcg_gen_qemu_st_i32(t4, addr, s->mem_idx,
MO_LE | MO_32 | atomicity);
}
if (i == size - 4) {
tcg_gen_movi_tl(cpu_vstart, 0);
} else {
tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz);
}
}
}
} else {
TCGv_ptr dest;
TCGv base;
TCGv_i32 desc;
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
data = FIELD_DP32(data, VDATA, VM, 1);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
fn(dest, base, tcg_env, desc);
}
finalize_rvv_inst(s);
return true;
@ -1128,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) & & \
QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
gen_helper_##NAME, s); \
} \
return false; \
}
GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) & & \
QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \
gen_helper_##NAME, s, IS_LOAD ); \
} \
return false; \
}
GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true )
GEN_LDST_WHOLE_TRANS(vl1re16_v, int 16_t, 1, true )
GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true )
GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true )
GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true )
GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true )
GEN_LDST_WHOLE_TRANS(vl2re32_v, int3 2_t, 2, true )
GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true )
GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true )
GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true )
GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true )
GEN_LDST_WHOLE_TRANS(vl4re64_v, int6 4_t, 4, true )
GEN_LDST_WHOLE_TRANS(vl8re8_v, int 8_t, 8, true )
GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true )
GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true )
GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true )
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false )
GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false )
GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false )
GEN_LDST_WHOLE_TRANS(vs8r_v, int 8_t, 8, false )
/*
*** Vector Integer Arithmetic Instructions
*/
/*
* MAXSZ returns the maximum vector size can be operated in bytes,
* which is used in GVEC IR when vl_eq_vlmax flag is set to true
* to accelerate vector operation.
*/
static inline uint32_t MAXSZ(DisasContext *s)
{
int max_sz = s->cfg_ptr->vlenb * 8;
return max_sz >> (3 - s->lmul);
}
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
@ -1475,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPIVV with overwrite and WIDEN */
static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
gen_helper_gvec_4_ptr *fn,
bool (*checkfn)(DisasContext *, arg_rmrr *))
@ -1522,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_ds(s, a->rd, a->rs2, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
@ -1993,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
/* Vector Widening Integer Multiply-Add Instructions */
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_ widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_ widen_check)
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_ widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_ widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_ widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_ widen_check)
GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_ widen_check)
/* Vector Integer Merge and Move Instructions */
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
@ -2340,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
require_rvf(s) & &
require_scale_rvf(s) & &
vext_check_isa_ill(s) & &
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
/* OPFVV with WIDEN */
#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
@ -2379,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
require_rvf(s) & &
require_scale_rvf(s) & &
vext_check_isa_ill(s) & &
vext_check_ds(s, a->rd, a->rs2, a->vm) & &
vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
}
/* OPFVF with WIDEN */
#define GEN_OPFVF_WIDEN_TRANS(NAME) \
#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
if (opfvf_widen_check(s, a)) { \
if (CHECK(s, a)) { \
uint32_t data = 0; \
static gen_helper_opfvf *const fns[2] = { \
gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
@ -2399,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check )
GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check )
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
@ -2482,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
/* Vector Widening Floating-Point Multiply */
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check )
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
@ -2503,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_ widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_ widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_ widen_check)
GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_ widen_check)
GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check )
GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check )
GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check )
GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check )
/* Vector Floating-Point Square-Root Instruction */
@ -3426,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) & &
require_align(a->rd, s->lmul) & &
require_align(a->rs1, s->lmul) & &
require_align(a->rs2, s->lmul) & &
@ -3438,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
int8_t emul = MO_16 - s->sew + s->lmul;
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) & &
(emul >= -3 & & emul < = 3) & &
require_align(a->rd, s->lmul) & &
require_align(a->rs1, emul) & &
@ -3457,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) & &
vext_check_isa_ill(s) & &
vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) & &
require_align(a->rd, s->lmul) & &
require_align(a->rs2, s->lmul) & &
(a->rd != a->rs2) & &
@ -3600,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
require_align(a->rd, s->lmul) & &
require_align(a->rs2, s->lmul - div) & &
require_vm(a->vm, a->rd) & &
require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) & &
vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm);
return ret;
}