Browse Source

Template-ize loads

pull/1122/head
Andrew Waterman 3 years ago
parent
commit
d41af9f81c
  1. 2
      customext/dummy_rocc.cc
  2. 8
      riscv/debug_module.cc
  3. 2
      riscv/insns/c_fld.h
  4. 2
      riscv/insns/c_fldsp.h
  5. 4
      riscv/insns/c_flw.h
  6. 4
      riscv/insns/c_flwsp.h
  7. 2
      riscv/insns/c_lw.h
  8. 2
      riscv/insns/c_lwsp.h
  9. 2
      riscv/insns/fld.h
  10. 2
      riscv/insns/flh.h
  11. 2
      riscv/insns/flw.h
  12. 2
      riscv/insns/lb.h
  13. 2
      riscv/insns/lbu.h
  14. 2
      riscv/insns/ld.h
  15. 2
      riscv/insns/lh.h
  16. 2
      riscv/insns/lhu.h
  17. 2
      riscv/insns/lw.h
  18. 2
      riscv/insns/lwu.h
  19. 10
      riscv/interactive.cc
  20. 18
      riscv/mmu.h
  21. 2
      riscv/sim.cc
  22. 16
      riscv/v_ext_macros.h

2
customext/dummy_rocc.cc

@ -22,7 +22,7 @@ class dummy_rocc_t : public rocc_t
case 1: // xd <- acc (the only real work is the return statement below)
break;
case 2: // acc[rs2] <- Mem[xs1]
acc[insn.rs2] = p->get_mmu()->load_uint64(xs1);
acc[insn.rs2] = p->get_mmu()->load<uint64_t>(xs1);
break;
case 3: // acc[rs2] <- accX + xs1
acc[insn.rs2] += xs1;

8
riscv/debug_module.cc

@ -318,13 +318,13 @@ void debug_module_t::sb_read()
reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0];
try {
if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) {
sbdata[0] = sim->debug_mmu->load_uint8(address);
sbdata[0] = sim->debug_mmu->load<uint8_t>(address);
} else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) {
sbdata[0] = sim->debug_mmu->load_uint16(address);
sbdata[0] = sim->debug_mmu->load<uint16_t>(address);
} else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) {
sbdata[0] = sim->debug_mmu->load_uint32(address);
sbdata[0] = sim->debug_mmu->load<uint32_t>(address);
} else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) {
uint64_t value = sim->debug_mmu->load_uint64(address);
uint64_t value = sim->debug_mmu->load<uint64_t>(address);
sbdata[0] = value;
sbdata[1] = value >> 32;
} else {

2
riscv/insns/c_fld.h

@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
WRITE_RVC_FRS2S(f64(MMU.load_uint64(RVC_RS1S + insn.rvc_ld_imm())));
WRITE_RVC_FRS2S(f64(MMU.load<uint64_t>(RVC_RS1S + insn.rvc_ld_imm())));

2
riscv/insns/c_fldsp.h

@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
WRITE_FRD(f64(MMU.load_uint64(RVC_SP + insn.rvc_ldsp_imm())));
WRITE_FRD(f64(MMU.load<uint64_t>(RVC_SP + insn.rvc_ldsp_imm())));

4
riscv/insns/c_flw.h

@ -2,7 +2,7 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
WRITE_RVC_FRS2S(f32(MMU.load_uint32(RVC_RS1S + insn.rvc_lw_imm())));
WRITE_RVC_FRS2S(f32(MMU.load<uint32_t>(RVC_RS1S + insn.rvc_lw_imm())));
} else { // c.ld
WRITE_RVC_RS2S(MMU.load_int64(RVC_RS1S + insn.rvc_ld_imm()));
WRITE_RVC_RS2S(MMU.load<int64_t>(RVC_RS1S + insn.rvc_ld_imm()));
}

4
riscv/insns/c_flwsp.h

@ -2,8 +2,8 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
WRITE_FRD(f32(MMU.load_uint32(RVC_SP + insn.rvc_lwsp_imm())));
WRITE_FRD(f32(MMU.load<uint32_t>(RVC_SP + insn.rvc_lwsp_imm())));
} else { // c.ldsp
require(insn.rvc_rd() != 0);
WRITE_RD(MMU.load_int64(RVC_SP + insn.rvc_ldsp_imm()));
WRITE_RD(MMU.load<int64_t>(RVC_SP + insn.rvc_ldsp_imm()));
}

2
riscv/insns/c_lw.h

@ -1,2 +1,2 @@
require_extension('C');
WRITE_RVC_RS2S(MMU.load_int32(RVC_RS1S + insn.rvc_lw_imm()));
WRITE_RVC_RS2S(MMU.load<int32_t>(RVC_RS1S + insn.rvc_lw_imm()));

2
riscv/insns/c_lwsp.h

@ -1,3 +1,3 @@
require_extension('C');
require(insn.rvc_rd() != 0);
WRITE_RD(MMU.load_int32(RVC_SP + insn.rvc_lwsp_imm()));
WRITE_RD(MMU.load<int32_t>(RVC_SP + insn.rvc_lwsp_imm()));

2
riscv/insns/fld.h

@ -1,3 +1,3 @@
require_extension('D');
require_fp;
WRITE_FRD(f64(MMU.load_uint64(RS1 + insn.i_imm())));
WRITE_FRD(f64(MMU.load<uint64_t>(RS1 + insn.i_imm())));

2
riscv/insns/flh.h

@ -1,3 +1,3 @@
require_extension(EXT_ZFHMIN);
require_fp;
WRITE_FRD(f16(MMU.load_uint16(RS1 + insn.i_imm())));
WRITE_FRD(f16(MMU.load<uint16_t>(RS1 + insn.i_imm())));

2
riscv/insns/flw.h

@ -1,3 +1,3 @@
require_extension('F');
require_fp;
WRITE_FRD(f32(MMU.load_uint32(RS1 + insn.i_imm())));
WRITE_FRD(f32(MMU.load<uint32_t>(RS1 + insn.i_imm())));

2
riscv/insns/lb.h

@ -1 +1 @@
WRITE_RD(MMU.load_int8(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<int8_t>(RS1 + insn.i_imm()));

2
riscv/insns/lbu.h

@ -1 +1 @@
WRITE_RD(MMU.load_uint8(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<uint8_t>(RS1 + insn.i_imm()));

2
riscv/insns/ld.h

@ -1,2 +1,2 @@
require_rv64;
WRITE_RD(MMU.load_int64(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<int64_t>(RS1 + insn.i_imm()));

2
riscv/insns/lh.h

@ -1 +1 @@
WRITE_RD(MMU.load_int16(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<int16_t>(RS1 + insn.i_imm()));

2
riscv/insns/lhu.h

@ -1 +1 @@
WRITE_RD(MMU.load_uint16(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<uint16_t>(RS1 + insn.i_imm()));

2
riscv/insns/lw.h

@ -1 +1 @@
WRITE_RD(MMU.load_int32(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<int32_t>(RS1 + insn.i_imm()));

2
riscv/insns/lwu.h

@ -1,2 +1,2 @@
require_rv64;
WRITE_RD(MMU.load_uint32(RS1 + insn.i_imm()));
WRITE_RD(MMU.load<uint32_t>(RS1 + insn.i_imm()));

10
riscv/interactive.cc

@ -678,17 +678,17 @@ reg_t sim_t::get_mem(const std::vector<std::string>& args)
switch (addr % 8)
{
case 0:
val = mmu->load_uint64(addr);
val = mmu->load<uint64_t>(addr);
break;
case 4:
val = mmu->load_uint32(addr);
val = mmu->load<uint32_t>(addr);
break;
case 2:
case 6:
val = mmu->load_uint16(addr);
val = mmu->load<uint16_t>(addr);
break;
default:
val = mmu->load_uint8(addr);
val = mmu->load<uint8_t>(addr);
break;
}
return val;
@ -722,7 +722,7 @@ void sim_t::interactive_str(const std::string& cmd, const std::vector<std::strin
std::ostream out(sout_.rdbuf());
char ch;
while ((ch = mmu->load_uint8(addr++)))
while ((ch = mmu->load<uint8_t>(addr++)))
out << ch;
out << std::endl;

18
riscv/mmu.h

@ -94,22 +94,6 @@ public:
return load<T>(addr, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX);
}
// template for functions that load an aligned value from memory
#define load_func(type, prefix, xlate_flags) \
type##_t ALWAYS_INLINE prefix##_##type(reg_t addr) { return load<type##_t>(addr, xlate_flags); }
// load value from memory at aligned address; zero extend to register width
load_func(uint8, load, 0)
load_func(uint16, load, 0)
load_func(uint32, load, 0)
load_func(uint64, load, 0)
// load value from memory at aligned address; sign extend to register width
load_func(int8, load, 0)
load_func(int16, load, 0)
load_func(int32, load, 0)
load_func(int64, load, 0)
#ifndef RISCV_ENABLE_COMMITLOG
# define WRITE_MEM(addr, value, size) ((void)(addr), (void)(value), (void)(size))
#else
@ -185,7 +169,7 @@ public:
if (unlikely(addr & (sizeof(float128_t)-1)))
throw trap_load_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0);
#endif
return (float128_t){load_uint64(addr), load_uint64(addr + 8)};
return (float128_t){load<uint64_t>(addr), load<uint64_t>(addr + 8)};
}
// store value to memory at aligned address

2
riscv/sim.cc

@ -420,7 +420,7 @@ void sim_t::idle()
void sim_t::read_chunk(addr_t taddr, size_t len, void* dst)
{
assert(len == 8);
auto data = debug_mmu->to_target(debug_mmu->load_uint64(taddr));
auto data = debug_mmu->to_target(debug_mmu->load<uint64_t>(taddr));
memcpy(dst, &data, sizeof data);
}

16
riscv/v_ext_macros.h

@ -1183,7 +1183,7 @@ reg_t index[P.VU.vlmax]; \
VI_STRIP(i); \
P.VU.vstart->write(i); \
for (reg_t fn = 0; fn < nf; ++fn) { \
elt_width##_t val = MMU.load_##elt_width( \
elt_width##_t val = MMU.load<elt_width##_t>( \
baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \
P.VU.elt<elt_width##_t>(vd + fn * emul, vreg_inx, true) = val; \
} \
@ -1207,19 +1207,19 @@ reg_t index[P.VU.vlmax]; \
switch (P.VU.vsew) { \
case e8: \
P.VU.elt<uint8_t>(vd + fn * flmul, vreg_inx, true) = \
MMU.load_uint8(baseAddr + index[i] + fn * 1); \
MMU.load<uint8_t>(baseAddr + index[i] + fn * 1); \
break; \
case e16: \
P.VU.elt<uint16_t>(vd + fn * flmul, vreg_inx, true) = \
MMU.load_uint16(baseAddr + index[i] + fn * 2); \
MMU.load<uint16_t>(baseAddr + index[i] + fn * 2); \
break; \
case e32: \
P.VU.elt<uint32_t>(vd + fn * flmul, vreg_inx, true) = \
MMU.load_uint32(baseAddr + index[i] + fn * 4); \
MMU.load<uint32_t>(baseAddr + index[i] + fn * 4); \
break; \
default: \
P.VU.elt<uint64_t>(vd + fn * flmul, vreg_inx, true) = \
MMU.load_uint64(baseAddr + index[i] + fn * 8); \
MMU.load<uint64_t>(baseAddr + index[i] + fn * 8); \
break; \
} \
} \
@ -1294,7 +1294,7 @@ reg_t index[P.VU.vlmax]; \
for (reg_t fn = 0; fn < nf; ++fn) { \
uint64_t val; \
try { \
val = MMU.load_##elt_width( \
val = MMU.load<elt_width##_t>( \
baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \
} catch (trap_t& t) { \
if (i == 0) \
@ -1327,7 +1327,7 @@ reg_t index[P.VU.vlmax]; \
reg_t off = P.VU.vstart->read() % elt_per_reg; \
if (off) { \
for (reg_t pos = off; pos < elt_per_reg; ++pos) { \
auto val = MMU.load_## elt_width(baseAddr + \
auto val = MMU.load<elt_width##_t>(baseAddr + \
P.VU.vstart->read() * sizeof(elt_width ## _t)); \
P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
P.VU.vstart->write(P.VU.vstart->read() + 1); \
@ -1336,7 +1336,7 @@ reg_t index[P.VU.vlmax]; \
} \
for (; i < len; ++i) { \
for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \
auto val = MMU.load_## elt_width(baseAddr + \
auto val = MMU.load<elt_width##_t>(baseAddr + \
P.VU.vstart->read() * sizeof(elt_width ## _t)); \
P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
P.VU.vstart->write(P.VU.vstart->read() + 1); \

Loading…
Cancel
Save