@ -11976,7 +11976,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
int h = extract32 ( insn , 11 , 1 ) ;
int rn = extract32 ( insn , 5 , 5 ) ;
int rd = extract32 ( insn , 0 , 5 ) ;
bool is_long = false ;
int index ;
switch ( 16 * u + opcode ) {
@ -11990,12 +11989,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
unallocated_encoding ( s ) ;
return ;
}
is_long = true ;
break ;
case 0x03 : /* SQDMLAL, SQDMLAL2 */
case 0x07 : /* SQDMLSL, SQDMLSL2 */
case 0x0b : /* SQDMULL, SQDMULL2 */
is_long = true ;
break ;
default :
case 0x00 : /* FMLAL */
@ -12047,96 +12044,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
if ( size = = 3 ) {
g_assert_not_reached ( ) ;
} else if ( ! is_long ) {
/* 32 bit floating point, or 16 or 32 bit integer.
* For the 16 bit scalar case we use the usual Neon helpers and
* rely on the fact that 0 op 0 = = 0 with no side effects .
*/
TCGv_i32 tcg_idx = tcg_temp_new_i32 ( ) ;
int pass , maxpasses ;
if ( is_scalar ) {
maxpasses = 1 ;
} else {
maxpasses = is_q ? 4 : 2 ;
}
read_vec_element_i32 ( s , tcg_idx , rm , index , size ) ;
if ( size = = 1 & & ! is_scalar ) {
/* The simplest way to handle the 16x16 indexed ops is to duplicate
* the index into both halves of the 32 bit tcg_idx and then use
* the usual Neon helpers .
*/
tcg_gen_deposit_i32 ( tcg_idx , tcg_idx , tcg_idx , 16 , 16 ) ;
}
for ( pass = 0 ; pass < maxpasses ; pass + + ) {
TCGv_i32 tcg_op = tcg_temp_new_i32 ( ) ;
TCGv_i32 tcg_res = tcg_temp_new_i32 ( ) ;
read_vec_element_i32 ( s , tcg_op , rn , pass , is_scalar ? size : MO_32 ) ;
switch ( 16 * u + opcode ) {
case 0x10 : /* MLA */
case 0x14 : /* MLS */
{
static NeonGenTwoOpFn * const fns [ 2 ] [ 2 ] = {
{ gen_helper_neon_add_u16 , gen_helper_neon_sub_u16 } ,
{ tcg_gen_add_i32 , tcg_gen_sub_i32 } ,
} ;
NeonGenTwoOpFn * genfn ;
bool is_sub = opcode = = 0x4 ;
if ( size = = 1 ) {
gen_helper_neon_mul_u16 ( tcg_res , tcg_op , tcg_idx ) ;
} else {
tcg_gen_mul_i32 ( tcg_res , tcg_op , tcg_idx ) ;
}
if ( opcode = = 0x8 ) {
break ;
}
read_vec_element_i32 ( s , tcg_op , rd , pass , MO_32 ) ;
genfn = fns [ size - 1 ] [ is_sub ] ;
genfn ( tcg_res , tcg_op , tcg_res ) ;
break ;
}
case 0x0c : /* SQDMULH */
if ( size = = 1 ) {
gen_helper_neon_qdmulh_s16 ( tcg_res , tcg_env ,
tcg_op , tcg_idx ) ;
} else {
gen_helper_neon_qdmulh_s32 ( tcg_res , tcg_env ,
tcg_op , tcg_idx ) ;
}
break ;
case 0x0d : /* SQRDMULH */
if ( size = = 1 ) {
gen_helper_neon_qrdmulh_s16 ( tcg_res , tcg_env ,
tcg_op , tcg_idx ) ;
} else {
gen_helper_neon_qrdmulh_s32 ( tcg_res , tcg_env ,
tcg_op , tcg_idx ) ;
}
break ;
default :
case 0x01 : /* FMLA */
case 0x05 : /* FMLS */
case 0x09 : /* FMUL */
case 0x19 : /* FMULX */
case 0x1d : /* SQRDMLAH */
case 0x1f : /* SQRDMLSH */
g_assert_not_reached ( ) ;
}
if ( is_scalar ) {
write_fp_sreg ( s , rd , tcg_res ) ;
} else {
write_vec_element_i32 ( s , tcg_res , rd , pass , MO_32 ) ;
}
}
clear_vec_high ( s , is_q , rd ) ;
} else {
/* long ops: 16x16->32 or 32x32->64 */
TCGv_i64 tcg_res [ 2 ] ;