Browse Source

AMOs should always return store faults, not load faults

This commit also factors out the common AMO code into mmu_t.
pull/75/head
Andrew Waterman 10 years ago
parent
commit
bf8d2b71bb
  1. 4
      riscv/insns/amoadd_d.h
  2. 4
      riscv/insns/amoadd_w.h
  3. 4
      riscv/insns/amoand_d.h
  4. 4
      riscv/insns/amoand_w.h
  5. 4
      riscv/insns/amomax_d.h
  6. 4
      riscv/insns/amomax_w.h
  7. 4
      riscv/insns/amomaxu_d.h
  8. 4
      riscv/insns/amomaxu_w.h
  9. 4
      riscv/insns/amomin_d.h
  10. 4
      riscv/insns/amomin_w.h
  11. 4
      riscv/insns/amominu_d.h
  12. 4
      riscv/insns/amominu_w.h
  13. 4
      riscv/insns/amoor_d.h
  14. 4
      riscv/insns/amoor_w.h
  15. 4
      riscv/insns/amoswap_d.h
  16. 4
      riscv/insns/amoswap_w.h
  17. 4
      riscv/insns/amoxor_d.h
  18. 4
      riscv/insns/amoxor_w.h
  19. 20
      riscv/mmu.h

4
riscv/insns/amoadd_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, RS2 + v);
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs + RS2; }));

4
riscv/insns/amoadd_w.h

@ -1,4 +1,2 @@
require_extension('A');
reg_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, RS2 + v);
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs + RS2; })));

4
riscv/insns/amoand_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, RS2 & v);
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs & RS2; }));

4
riscv/insns/amoand_w.h

@ -1,4 +1,2 @@
require_extension('A');
reg_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, RS2 & v);
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs & RS2; })));

4
riscv/insns/amomax_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
sreg_t v = MMU.load_int64(RS1);
MMU.store_uint64(RS1, std::max(sreg_t(RS2),v));
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::max(lhs, int64_t(RS2)); }));

4
riscv/insns/amomax_w.h

@ -1,4 +1,2 @@
require_extension('A');
int32_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, std::max(int32_t(RS2),v));
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::max(lhs, int32_t(RS2)); })));

4
riscv/insns/amomaxu_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, std::max(RS2,v));
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::max(lhs, RS2); }));

4
riscv/insns/amomaxu_w.h

@ -1,4 +1,2 @@
require_extension('A');
uint32_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, std::max(uint32_t(RS2),v));
WRITE_RD((int32_t)v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::max(lhs, uint32_t(RS2)); })));

4
riscv/insns/amomin_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
sreg_t v = MMU.load_int64(RS1);
MMU.store_uint64(RS1, std::min(sreg_t(RS2),v));
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::min(lhs, int64_t(RS2)); }));

4
riscv/insns/amomin_w.h

@ -1,4 +1,2 @@
require_extension('A');
int32_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, std::min(int32_t(RS2),v));
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::min(lhs, int32_t(RS2)); })));

4
riscv/insns/amominu_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, std::min(RS2,v));
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::min(lhs, RS2); }));

4
riscv/insns/amominu_w.h

@ -1,4 +1,2 @@
require_extension('A');
uint32_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, std::min(uint32_t(RS2),v));
WRITE_RD((int32_t)v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::min(lhs, uint32_t(RS2)); })));

4
riscv/insns/amoor_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, RS2 | v);
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs | RS2; }));

4
riscv/insns/amoor_w.h

@ -1,4 +1,2 @@
require_extension('A');
reg_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, RS2 | v);
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs | RS2; })));

4
riscv/insns/amoswap_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, RS2);
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return RS2; }));

4
riscv/insns/amoswap_w.h

@ -1,4 +1,2 @@
require_extension('A');
reg_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, RS2);
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return RS2; })));

4
riscv/insns/amoxor_d.h

@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
reg_t v = MMU.load_uint64(RS1);
MMU.store_uint64(RS1, RS2 ^ v);
WRITE_RD(v);
WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs ^ RS2; }));

4
riscv/insns/amoxor_w.h

@ -1,4 +1,2 @@
require_extension('A');
reg_t v = MMU.load_int32(RS1);
MMU.store_uint32(RS1, RS2 ^ v);
WRITE_RD(v);
WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs ^ RS2; })));

20
riscv/mmu.h

@ -105,12 +105,32 @@ public:
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
}
// template for functions that perform an atomic memory operation
#define amo_func(type) \
template<typename op> \
type##_t amo_##type(reg_t addr, op f) { \
if (addr & (sizeof(type##_t)-1)) \
throw trap_store_address_misaligned(addr); \
try { \
auto lhs = load_##type(addr); \
store_##type(addr, f(lhs)); \
return lhs; \
} catch (trap_load_access_fault& t) { \
/* AMO faults should be reported as store faults */ \
throw trap_store_access_fault(t.get_badaddr()); \
} \
}
// store value to memory at aligned address
store_func(uint8)
store_func(uint16)
store_func(uint32)
store_func(uint64)
// perform an atomic memory operation at an aligned address
amo_func(uint32)
amo_func(uint64)
static const reg_t ICACHE_ENTRIES = 1024;
inline size_t icache_index(reg_t addr)

Loading…
Cancel
Save