Browse Source

target/arm: Implement PMOV for SME2p1/SVE2p1

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20250704142112.1018902-91-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
pull/294/head
Richard Henderson 9 months ago
committed by Peter Maydell
parent
commit
e421e4e972
  1. 8
      target/arm/tcg/helper-sve.h
  2. 17
      target/arm/tcg/sve.decode
  3. 50
      target/arm/tcg/sve_helper.c
  4. 98
      target/arm/tcg/translate-sve.c
  5. 34
      target/arm/tcg/vec_internal.h

8
target/arm/tcg/helper-sve.h

@ -3020,3 +3020,11 @@ DEF_HELPER_FLAGS_4(sve2p1_andqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2p1_andqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2p1_andqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2p1_andqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_pv_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_pv_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_pv_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_vp_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_vp_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(pmov_vp_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)

17
target/arm/tcg/sve.decode

@ -30,6 +30,7 @@
%size_23 23:2
%dtype_23_13 23:2 13:2
%index3_22_19 22:1 19:2
%index3_22_17 22:1 17:2
%index3_19_11 19:2 11:1
%index2_20_11 20:1 11:1
@ -594,6 +595,22 @@ INSR_r 00000101 .. 1 00100 001110 ..... ..... @rdn_rm
# SVE reverse vector elements
REV_v 00000101 .. 1 11000 001110 ..... ..... @rd_rn
# SVE move predicate to/from vector
PMOV_pv 00000101 00 101 01 0001110 rn:5 0 rd:4 \
&rri_esz esz=0 imm=0
PMOV_pv 00000101 00 101 1 imm:1 0001110 rn:5 0 rd:4 &rri_esz esz=1
PMOV_pv 00000101 01 101 imm:2 0001110 rn:5 0 rd:4 &rri_esz esz=2
PMOV_pv 00000101 1. 101 .. 0001110 rn:5 0 rd:4 \
&rri_esz esz=3 imm=%index3_22_17
PMOV_vp 00000101 00 101 01 1001110 0 rn:4 rd:5 \
&rri_esz esz=0 imm=0
PMOV_vp 00000101 00 101 1 imm:1 1001110 0 rn:4 rd:5 &rri_esz esz=1
PMOV_vp 00000101 01 101 imm:2 1001110 0 rn:4 rd:5 &rri_esz esz=2
PMOV_vp 00000101 1. 101 .. 1001110 0 rn:4 rd:5 \
&rri_esz esz=3 imm=%index3_22_17
# SVE vector table lookup
TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm

50
target/arm/tcg/sve_helper.c

@ -3035,6 +3035,56 @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
}
}
/*
* TODO: This could use half_shuffle64 and similar bit tricks to
* expand blocks of bits at once.
*/
#define DO_PMOV_PV(NAME, ESIZE) \
void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
{ \
unsigned vl = simd_oprsz(desc); \
unsigned idx = simd_data(desc); \
unsigned elements = vl / ESIZE; \
ARMPredicateReg *d = vd; \
ARMVectorReg *s = vs; \
memset(d, 0, sizeof(*d)); \
for (unsigned e = 0; e < elements; ++e) { \
depositn(d->p, e * ESIZE, 1, extractn(s->d, elements * idx + e, 1)); \
} \
}
DO_PMOV_PV(pmov_pv_h, 2)
DO_PMOV_PV(pmov_pv_s, 4)
DO_PMOV_PV(pmov_pv_d, 8)
#undef DO_PMOV_PV
/*
* TODO: This could use half_unshuffle64 and similar bit tricks to
* compress blocks of bits at once.
*/
#define DO_PMOV_VP(NAME, ESIZE) \
void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
{ \
unsigned vl = simd_oprsz(desc); \
unsigned idx = simd_data(desc); \
unsigned elements = vl / ESIZE; \
ARMVectorReg *d = vd; \
ARMPredicateReg *s = vs; \
if (idx == 0) { \
memset(d, 0, vl); \
} \
for (unsigned e = 0; e < elements; ++e) { \
depositn(d->d, elements * idx + e, 1, extractn(s->p, e * ESIZE, 1)); \
} \
}
DO_PMOV_VP(pmov_vp_h, 2)
DO_PMOV_VP(pmov_vp_s, 4)
DO_PMOV_VP(pmov_vp_d, 8)
#undef DO_PMOV_VP
typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,

98
target/arm/tcg/translate-sve.c

@ -2386,6 +2386,104 @@ static gen_helper_gvec_3 * const tbx_fns[4] = {
};
TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
static bool trans_PMOV_pv(DisasContext *s, arg_PMOV_pv *a)
{
static gen_helper_gvec_2 * const fns[4] = {
NULL, gen_helper_pmov_pv_h,
gen_helper_pmov_pv_s, gen_helper_pmov_pv_d
};
unsigned vl, pl, vofs, pofs;
TCGv_i64 tmp;
if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
return false;
}
if (!sve_access_check(s)) {
return true;
}
vl = vec_full_reg_size(s);
if (a->esz != MO_8) {
tcg_gen_gvec_2_ool(pred_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vl, vl, a->imm, fns[a->esz]);
return true;
}
/*
* Copy the low PL bytes from vector Zn, zero-extending to a
* multiple of 8 bytes, so that Pd is properly cleared.
*/
pl = vl / 8;
pofs = pred_full_reg_offset(s, a->rd);
vofs = vec_full_reg_offset(s, a->rn);
QEMU_BUILD_BUG_ON(sizeof(ARMPredicateReg) != 32);
for (unsigned i = 32; i >= 8; i >>= 1) {
if (pl & i) {
tcg_gen_gvec_mov(MO_64, pofs, vofs, i, i);
pofs += i;
vofs += i;
}
}
switch (pl & 7) {
case 0:
return true;
case 2:
tmp = tcg_temp_new_i64();
tcg_gen_ld16u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 6 : 0));
break;
case 4:
tmp = tcg_temp_new_i64();
tcg_gen_ld32u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 4 : 0));
break;
case 6:
tmp = tcg_temp_new_i64();
tcg_gen_ld_i64(tmp, tcg_env, vofs);
tcg_gen_extract_i64(tmp, tmp, 0, 48);
break;
default:
g_assert_not_reached();
}
tcg_gen_st_i64(tmp, tcg_env, pofs);
return true;
}
static bool trans_PMOV_vp(DisasContext *s, arg_PMOV_pv *a)
{
static gen_helper_gvec_2 * const fns[4] = {
NULL, gen_helper_pmov_vp_h,
gen_helper_pmov_vp_s, gen_helper_pmov_vp_d
};
unsigned vl;
if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
return false;
}
if (!sve_access_check(s)) {
return true;
}
vl = vec_full_reg_size(s);
if (a->esz == MO_8) {
/*
* The low PL bytes are copied from Pn to Zd unchanged.
* We know that the unused portion of Pn is zero, and
* that imm == 0, so the balance of Zd must be zeroed.
*/
tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, a->rd),
pred_full_reg_offset(s, a->rn),
size_for_gvec(vl / 8), vl);
} else {
tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
pred_full_reg_offset(s, a->rn),
vl, vl, a->imm, fns[a->esz]);
}
return true;
}
static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
{
static gen_helper_gvec_2 * const fns[4][2] = {

34
target/arm/tcg/vec_internal.h

@ -411,4 +411,38 @@ decode_counter(unsigned png, unsigned vl, unsigned v_esz)
return ret;
}
/* Extract @len bits from an array of uint64_t at offset @pos bits. */
static inline uint64_t extractn(uint64_t *p, unsigned pos, unsigned len)
{
uint64_t x;
p += pos / 64;
pos = pos % 64;
x = p[0];
if (pos + len > 64) {
x = (x >> pos) | (p[1] << (-pos & 63));
pos = 0;
}
return extract64(x, pos, len);
}
/* Deposit @len bits into an array of uint64_t at offset @pos bits. */
static inline void depositn(uint64_t *p, unsigned pos,
unsigned len, uint64_t val)
{
p += pos / 64;
pos = pos % 64;
if (pos + len <= 64) {
p[0] = deposit64(p[0], pos, len, val);
} else {
unsigned len0 = 64 - pos;
unsigned len1 = len - len0;
p[0] = deposit64(p[0], pos, len0, val);
p[1] = deposit64(p[1], 0, len1, val >> len0);
}
}
#endif /* TARGET_ARM_VEC_INTERNAL_H */

Loading…
Cancel
Save