@ -2694,6 +2694,74 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
}
}
void tcg_gen_vec_rotl8i_i64 ( TCGv_i64 d , TCGv_i64 a , int64_t c )
{
uint64_t mask = dup_const ( MO_8 , 0xff < < c ) ;
tcg_gen_shli_i64 ( d , a , c ) ;
tcg_gen_shri_i64 ( a , a , 8 - c ) ;
tcg_gen_andi_i64 ( d , d , mask ) ;
tcg_gen_andi_i64 ( a , a , ~ mask ) ;
tcg_gen_or_i64 ( d , d , a ) ;
}
void tcg_gen_vec_rotl16i_i64 ( TCGv_i64 d , TCGv_i64 a , int64_t c )
{
uint64_t mask = dup_const ( MO_16 , 0xffff < < c ) ;
tcg_gen_shli_i64 ( d , a , c ) ;
tcg_gen_shri_i64 ( a , a , 16 - c ) ;
tcg_gen_andi_i64 ( d , d , mask ) ;
tcg_gen_andi_i64 ( a , a , ~ mask ) ;
tcg_gen_or_i64 ( d , d , a ) ;
}
void tcg_gen_gvec_rotli ( unsigned vece , uint32_t dofs , uint32_t aofs ,
int64_t shift , uint32_t oprsz , uint32_t maxsz )
{
static const TCGOpcode vecop_list [ ] = { INDEX_op_rotli_vec , 0 } ;
static const GVecGen2i g [ 4 ] = {
{ . fni8 = tcg_gen_vec_rotl8i_i64 ,
. fniv = tcg_gen_rotli_vec ,
. fno = gen_helper_gvec_rotl8i ,
. opt_opc = vecop_list ,
. vece = MO_8 } ,
{ . fni8 = tcg_gen_vec_rotl16i_i64 ,
. fniv = tcg_gen_rotli_vec ,
. fno = gen_helper_gvec_rotl16i ,
. opt_opc = vecop_list ,
. vece = MO_16 } ,
{ . fni4 = tcg_gen_rotli_i32 ,
. fniv = tcg_gen_rotli_vec ,
. fno = gen_helper_gvec_rotl32i ,
. opt_opc = vecop_list ,
. vece = MO_32 } ,
{ . fni8 = tcg_gen_rotli_i64 ,
. fniv = tcg_gen_rotli_vec ,
. fno = gen_helper_gvec_rotl64i ,
. opt_opc = vecop_list ,
. prefer_i64 = TCG_TARGET_REG_BITS = = 64 ,
. vece = MO_64 } ,
} ;
tcg_debug_assert ( vece < = MO_64 ) ;
tcg_debug_assert ( shift > = 0 & & shift < ( 8 < < vece ) ) ;
if ( shift = = 0 ) {
tcg_gen_gvec_mov ( vece , dofs , aofs , oprsz , maxsz ) ;
} else {
tcg_gen_gvec_2i ( dofs , aofs , oprsz , maxsz , shift , & g [ vece ] ) ;
}
}
void tcg_gen_gvec_rotri ( unsigned vece , uint32_t dofs , uint32_t aofs ,
int64_t shift , uint32_t oprsz , uint32_t maxsz )
{
tcg_debug_assert ( vece < = MO_64 ) ;
tcg_debug_assert ( shift > = 0 & & shift < ( 8 < < vece ) ) ;
tcg_gen_gvec_rotli ( vece , dofs , aofs , - shift & ( ( 8 < < vece ) - 1 ) ,
oprsz , maxsz ) ;
}
/*
* Specialized generation vector shifts by a non - constant scalar .
*/