@ -137,15 +137,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define SOFTMMU_RESERVE_REGS 0
#endif
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
{
if (TCG_TARGET_REG_BITS == 32) {
return sextract32(val, pos, len);
} else {
return sextract64(val, pos, len);
}
}
#define sextreg sextract64
/* test if a constant matches the constraint */
static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
@ -235,7 +227,6 @@ typedef enum {
OPC_XOR = 0x4033,
OPC_XORI = 0x4013,
#if TCG_TARGET_REG_BITS == 64
OPC_ADDIW = 0x1b,
OPC_ADDW = 0x3b,
OPC_DIVUW = 0x200503b,
@ -250,23 +241,6 @@ typedef enum {
OPC_SRLIW = 0x501b,
OPC_SRLW = 0x503b,
OPC_SUBW = 0x4000003b,
#else
/* Simplify code throughout by defining aliases for RV32. */
OPC_ADDIW = OPC_ADDI,
OPC_ADDW = OPC_ADD,
OPC_DIVUW = OPC_DIVU,
OPC_DIVW = OPC_DIV,
OPC_MULW = OPC_MUL,
OPC_REMUW = OPC_REMU,
OPC_REMW = OPC_REM,
OPC_SLLIW = OPC_SLLI,
OPC_SLLW = OPC_SLL,
OPC_SRAIW = OPC_SRAI,
OPC_SRAW = OPC_SRA,
OPC_SRLIW = OPC_SRLI,
OPC_SRLW = OPC_SRL,
OPC_SUBW = OPC_SUB,
#endif
OPC_FENCE = 0x0000000f,
OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
@ -500,7 +474,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long lo, hi, tmp;
int shift, ret;
if (TCG_TARGET_REG_BITS == 64 & & type == TCG_TYPE_I32) {
if (type == TCG_TYPE_I32) {
val = (int32_t)val;
}
@ -511,7 +485,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
}
hi = val - lo;
if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) {
if (val == (int32_t)val) {
tcg_out_opc_upper(s, OPC_LUI, rd, hi);
if (lo != 0) {
tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
@ -519,7 +493,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
return;
}
/* We can only be here if TCG_TARGET_REG_BITS != 32 */
tmp = tcg_pcrel_diff(s, (void *)val);
if (tmp == (int32_t)tmp) {
tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
@ -668,15 +641,15 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) ;
tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD , arg, arg1, arg2);
RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD ;
tcg_out_ldst(s, insn , arg, arg1, arg2);
}
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) ;
tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD , arg, arg1, arg2);
RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD ;
tcg_out_ldst(s, insn , arg, arg1, arg2);
}
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
@ -829,20 +802,6 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
}
}
static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
TCGReg bl, TCGReg bh, TCGLabel *l)
{
/* todo */
g_assert_not_reached();
}
static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
{
/* todo */
g_assert_not_reached();
}
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
{
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
@ -853,20 +812,18 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
if (offset == sextreg(offset, 0, 20)) {
/* short jump: -2097150 to 2097152 */
tcg_out_opc_jump(s, OPC_JAL, link, offset);
} else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
} else if (offset == (int32_t)offset) {
/* long jump: -2147483646 to 2147483648 */
tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
ret = reloc_call(s->code_ptr - 2, arg);
tcg_debug_assert(ret == true);
} else if (TCG_TARGET_REG_BITS == 64) {
} else {
/* far jump: 64-bit */
tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
tcg_target_long base = (tcg_target_long)arg - imm;
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
} else {
g_assert_not_reached();
}
}
@ -942,9 +899,6 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
#endif
};
/* We don't support oversize guests */
QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS ) ;
/* We expect to use a 12-bit negative offset from ENV. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < - ( 1 < < 11 ) ) ;
@ -956,8 +910,7 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
tcg_debug_assert(ok);
}
static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
TCGReg addrh, MemOpIdx oi,
static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi,
tcg_insn_unit **label_ptr, bool is_load)
{
MemOp opc = get_memop(oi);
@ -973,7 +926,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl ,
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@ -992,10 +945,10 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
/* Clear the non-page, non-alignment bits from the address. */
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 < < a_bits ) - 1 ) ;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl , compare_mask);
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask);
} else {
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl );
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr);
}
/* Compare masked address with the TLB entry. */
@ -1003,29 +956,26 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
/* TLB Hit - translate address using addend. */
if (TCG_TARGET_REG_BITS > T ARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP0, addrl );
addrl = TCG_REG_TMP0;
if (TARGET_LONG_BITS == 32 ) {
tcg_out_ext32u(s, TCG_REG_TMP0, addr);
addr = TCG_REG_TMP0;
}
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl );
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr);
return TCG_REG_TMP0;
}
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
TCGType ext,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
void *raddr, tcg_insn_unit **label_ptr)
TCGType data_type, TCGReg data_reg,
TCGReg addr_reg, void *raddr,
tcg_insn_unit **label_ptr)
{
TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld;
label->oi = oi;
label->type = ext;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
label->addrhi_reg = addrhi;
label->type = data_type;
label->datalo_reg = data_reg;
label->addrlo_reg = addr_reg;
label->raddr = tcg_splitwx_to_rx(raddr);
label->label_ptr[0] = label_ptr[0];
}
@ -1039,11 +989,6 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
TCGReg a2 = tcg_target_call_iarg_regs[2];
TCGReg a3 = tcg_target_call_iarg_regs[3];
/* We don't support oversize guests */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ) {
g_assert_not_reached();
}
/* resolve label address */
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
@ -1073,11 +1018,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
TCGReg a3 = tcg_target_call_iarg_regs[3];
TCGReg a4 = tcg_target_call_iarg_regs[4];
/* We don't support oversize guests */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ) {
g_assert_not_reached();
}
/* resolve label address */
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
@ -1146,7 +1086,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi ,
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg va l,
TCGReg base, MemOp opc, bool is_64)
{
/* Byte swapping is left to middle-end expansion. */
@ -1154,37 +1094,28 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
switch (opc & (MO_SSIZE)) {
case MO_UB:
tcg_out_opc_imm(s, OPC_LBU, lo , base, 0);
tcg_out_opc_imm(s, OPC_LBU, va l, base, 0);
break;
case MO_SB:
tcg_out_opc_imm(s, OPC_LB, lo , base, 0);
tcg_out_opc_imm(s, OPC_LB, va l, base, 0);
break;
case MO_UW:
tcg_out_opc_imm(s, OPC_LHU, lo , base, 0);
tcg_out_opc_imm(s, OPC_LHU, va l, base, 0);
break;
case MO_SW:
tcg_out_opc_imm(s, OPC_LH, lo , base, 0);
tcg_out_opc_imm(s, OPC_LH, va l, base, 0);
break;
case MO_UL:
if (TCG_TARGET_REG_BITS == 64 & & is_64) {
tcg_out_opc_imm(s, OPC_LWU, lo , base, 0);
if (is_64) {
tcg_out_opc_imm(s, OPC_LWU, va l, base, 0);
break;
}
/* FALLTHRU */
case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo , base, 0);
tcg_out_opc_imm(s, OPC_LW, va l, base, 0);
break;
case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
} else if (lo != base) {
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
} else {
tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
}
tcg_out_opc_imm(s, OPC_LD, val, base, 0);
break;
default:
g_assert_not_reached();
@ -1193,8 +1124,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGReg addr_reg, data_reg;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
@ -1204,27 +1134,23 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
#endif
TCGReg base;
data_regl = *args++;
data_regh = (TCG_TARGET_REG_BITS == 32 & & is_64 ? *args++ : 0);
addr_regl = *args++;
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? * args + + : 0 ) ;
data_reg = *args++;
addr_reg = *args++;
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1);
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
add_qemu_ldst_label(s, 1, oi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_reg, addr_reg, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, true, addr_regl , a_bits);
tcg_out_test_alignment(s, true, addr_reg, a_bits);
}
base = addr_regl ;
if (TCG_TARGET_REG_BITS > T ARGET_LONG_BITS) {
base = addr_reg;
if (TARGET_LONG_BITS == 32 ) {
tcg_out_ext32u(s, TCG_REG_TMP0, base);
base = TCG_REG_TMP0;
}
@ -1232,11 +1158,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
base = TCG_REG_TMP0;
}
tcg_out_qemu_ld_direct(s, data_regl, data_regh , base, opc, is_64);
tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
#endif
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi ,
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg va l,
TCGReg base, MemOp opc)
{
/* Byte swapping is left to middle-end expansion. */
@ -1244,21 +1170,16 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
switch (opc & (MO_SSIZE)) {
case MO_8:
tcg_out_opc_store(s, OPC_SB, base, lo , 0);
tcg_out_opc_store(s, OPC_SB, base, va l, 0);
break;
case MO_16:
tcg_out_opc_store(s, OPC_SH, base, lo , 0);
tcg_out_opc_store(s, OPC_SH, base, va l, 0);
break;
case MO_32:
tcg_out_opc_store(s, OPC_SW, base, lo , 0);
tcg_out_opc_store(s, OPC_SW, base, va l, 0);
break;
case MO_64:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_store(s, OPC_SD, base, lo, 0);
} else {
tcg_out_opc_store(s, OPC_SW, base, lo, 0);
tcg_out_opc_store(s, OPC_SW, base, hi, 4);
}
tcg_out_opc_store(s, OPC_SD, base, val, 0);
break;
default:
g_assert_not_reached();
@ -1267,8 +1188,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGReg addr_reg, data_reg;
MemOpIdx oi;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
@ -1278,27 +1198,23 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
#endif
TCGReg base;
data_regl = *args++;
data_regh = (TCG_TARGET_REG_BITS == 32 & & is_64 ? *args++ : 0);
addr_regl = *args++;
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? * args + + : 0 ) ;
data_reg = *args++;
addr_reg = *args++;
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0);
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
add_qemu_ldst_label(s, 0, oi,
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
tcg_out_qemu_st_direct(s, data_reg, base, opc);
add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
data_reg, addr_reg, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, false, addr_regl , a_bits);
tcg_out_test_alignment(s, false, addr_reg, a_bits);
}
base = addr_regl ;
if (TCG_TARGET_REG_BITS > T ARGET_LONG_BITS) {
base = addr_reg;
if (TARGET_LONG_BITS == 32 ) {
tcg_out_ext32u(s, TCG_REG_TMP0, base);
base = TCG_REG_TMP0;
}
@ -1306,7 +1222,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
base = TCG_REG_TMP0;
}
tcg_out_qemu_st_direct(s, data_regl, data_regh , base, opc);
tcg_out_qemu_st_direct(s, data_reg, base, opc);
#endif
}
@ -1585,17 +1501,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_brcond_i64:
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
break;
case INDEX_op_brcond2_i32:
tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
break;
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
tcg_out_setcond(s, args[3], a0, a1, a2);
break;
case INDEX_op_setcond2_i32:
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
break;
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, false);
@ -1748,26 +1658,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sub2_i64:
return C_O2_I4(r, r, rZ, rZ, rM, rM);
case INDEX_op_brcond2_i32:
return C_O0_I4(rZ, rZ, rZ, rZ);
case INDEX_op_setcond2_i32:
return C_O1_I4(r, rZ, rZ, rZ, rZ);
case INDEX_op_qemu_ld_i32:
return (TARGET_LONG_BITS < = TCG_TARGET_REG_BITS
? C_O1_I1(r, L) : C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return (TARGET_LONG_BITS < = TCG_TARGET_REG_BITS
? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
: TARGET_LONG_BITS < = TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
: C_O2_I2(r, r, L, L));
return C_O1_I1(r, L);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
: TARGET_LONG_BITS < = TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
: C_O0_I4(LZ, LZ, L, L));
return C_O0_I2(LZ, L);
default:
g_assert_not_reached();
@ -1843,9 +1739,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
static void tcg_target_init(TCGContext *s)
{
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
if (TCG_TARGET_REG_BITS == 64) {
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
}
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
tcg_target_call_clobber_regs = -1u;
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);