@ -2386,6 +2386,104 @@ static gen_helper_gvec_3 * const tbx_fns[4] = {
} ;
TRANS_FEAT ( TBX , aa64_sve2 , gen_gvec_ool_arg_zzz , tbx_fns [ a - > esz ] , a , 0 )
static bool trans_PMOV_pv ( DisasContext * s , arg_PMOV_pv * a )
{
static gen_helper_gvec_2 * const fns [ 4 ] = {
NULL , gen_helper_pmov_pv_h ,
gen_helper_pmov_pv_s , gen_helper_pmov_pv_d
} ;
unsigned vl , pl , vofs , pofs ;
TCGv_i64 tmp ;
if ( ! dc_isar_feature ( aa64_sme2p1_or_sve2p1 , s ) ) {
return false ;
}
if ( ! sve_access_check ( s ) ) {
return true ;
}
vl = vec_full_reg_size ( s ) ;
if ( a - > esz ! = MO_8 ) {
tcg_gen_gvec_2_ool ( pred_full_reg_offset ( s , a - > rd ) ,
vec_full_reg_offset ( s , a - > rn ) ,
vl , vl , a - > imm , fns [ a - > esz ] ) ;
return true ;
}
/*
* Copy the low PL bytes from vector Zn , zero - extending to a
* multiple of 8 bytes , so that Pd is properly cleared .
*/
pl = vl / 8 ;
pofs = pred_full_reg_offset ( s , a - > rd ) ;
vofs = vec_full_reg_offset ( s , a - > rn ) ;
QEMU_BUILD_BUG_ON ( sizeof ( ARMPredicateReg ) ! = 32 ) ;
for ( unsigned i = 32 ; i > = 8 ; i > > = 1 ) {
if ( pl & i ) {
tcg_gen_gvec_mov ( MO_64 , pofs , vofs , i , i ) ;
pofs + = i ;
vofs + = i ;
}
}
switch ( pl & 7 ) {
case 0 :
return true ;
case 2 :
tmp = tcg_temp_new_i64 ( ) ;
tcg_gen_ld16u_i64 ( tmp , tcg_env , vofs + ( HOST_BIG_ENDIAN ? 6 : 0 ) ) ;
break ;
case 4 :
tmp = tcg_temp_new_i64 ( ) ;
tcg_gen_ld32u_i64 ( tmp , tcg_env , vofs + ( HOST_BIG_ENDIAN ? 4 : 0 ) ) ;
break ;
case 6 :
tmp = tcg_temp_new_i64 ( ) ;
tcg_gen_ld_i64 ( tmp , tcg_env , vofs ) ;
tcg_gen_extract_i64 ( tmp , tmp , 0 , 48 ) ;
break ;
default :
g_assert_not_reached ( ) ;
}
tcg_gen_st_i64 ( tmp , tcg_env , pofs ) ;
return true ;
}
static bool trans_PMOV_vp ( DisasContext * s , arg_PMOV_pv * a )
{
static gen_helper_gvec_2 * const fns [ 4 ] = {
NULL , gen_helper_pmov_vp_h ,
gen_helper_pmov_vp_s , gen_helper_pmov_vp_d
} ;
unsigned vl ;
if ( ! dc_isar_feature ( aa64_sme2p1_or_sve2p1 , s ) ) {
return false ;
}
if ( ! sve_access_check ( s ) ) {
return true ;
}
vl = vec_full_reg_size ( s ) ;
if ( a - > esz = = MO_8 ) {
/*
* The low PL bytes are copied from Pn to Zd unchanged .
* We know that the unused portion of Pn is zero , and
* that imm = = 0 , so the balance of Zd must be zeroed .
*/
tcg_gen_gvec_mov ( MO_64 , vec_full_reg_offset ( s , a - > rd ) ,
pred_full_reg_offset ( s , a - > rn ) ,
size_for_gvec ( vl / 8 ) , vl ) ;
} else {
tcg_gen_gvec_2_ool ( vec_full_reg_offset ( s , a - > rd ) ,
pred_full_reg_offset ( s , a - > rn ) ,
vl , vl , a - > imm , fns [ a - > esz ] ) ;
}
return true ;
}
static bool trans_UNPK ( DisasContext * s , arg_UNPK * a )
{
static gen_helper_gvec_2 * const fns [ 4 ] [ 2 ] = {