Browse Source

Add xlate_flags_t struct

Use xlate_flags_t rather than XLATE_FLAGS preprocessing directives
pull/1347/head
rbuchner 3 years ago
parent
commit
d091f84af4
  1. 34
      riscv/mmu.cc
  2. 56
      riscv/mmu.h

34
riscv/mmu.cc

@ -52,13 +52,13 @@ void throw_access_exception(bool virt, reg_t addr, access_type type)
}
}
reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags)
reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags)
{
if (!proc)
return addr;
bool virt = proc->state.v;
bool hlvx = xlate_flags & RISCV_XLATE_VIRT_HLVX;
bool hlvx = xlate_flags.hlvx;
reg_t mode = proc->state.prv;
if (type != FETCH) {
if (in_mprv()) {
@ -66,7 +66,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f
if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M)
virt = true;
}
if (xlate_flags & RISCV_XLATE_FORCED_VIRT) {
if (xlate_flags.forced_virt) {
virt = true;
mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP);
}
@ -85,7 +85,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
tlb_entry_t result;
reg_t vpn = vaddr >> PGSHIFT;
if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, {false, false, false});
if (auto host_addr = sim->addr_to_mem(paddr)) {
result = refill_tlb(vaddr, paddr, host_addr, FETCH);
} else {
@ -198,10 +198,10 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::
}
}
void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
{
reg_t vpn = addr >> PGSHIFT;
if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
memcpy(bytes, host_addr, len);
return;
@ -209,7 +209,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint
reg_t paddr = translate(addr, len, LOAD, xlate_flags);
if ((xlate_flags & RISCV_XLATE_LR) && !sim->reservable(paddr)) {
if (xlate_flags.lr && !sim->reservable(paddr)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
@ -217,30 +217,30 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint
memcpy(bytes, host_addr, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.trace(paddr, len, LOAD);
else if (xlate_flags == 0)
else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr))
refill_tlb(addr, paddr, host_addr, LOAD);
} else if (!mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
if (xlate_flags & RISCV_XLATE_LR) {
if (xlate_flags.lr) {
load_reservation_address = paddr;
}
}
void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
{
check_triggers(triggers::OPERATION_LOAD, addr);
if ((addr & (len - 1)) == 0) {
load_slow_path_intrapage(addr, len, bytes, xlate_flags);
} else {
bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags);
bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt;
if (!is_misaligned_enabled())
throw trap_load_address_misaligned(gva, addr, 0, 0);
if (xlate_flags & RISCV_XLATE_LR)
if (xlate_flags.lr)
throw trap_load_access_fault(gva, addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
@ -252,10 +252,10 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes));
}
void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store)
{
reg_t vpn = addr >> PGSHIFT;
if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
if (actually_store) {
auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
memcpy(host_addr, bytes, len);
@ -270,7 +270,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte
memcpy(host_addr, bytes, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
tracer.trace(paddr, len, STORE);
else if (xlate_flags == 0)
else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr))
refill_tlb(addr, paddr, host_addr, STORE);
} else if (!mmio_store(paddr, len, bytes)) {
throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
@ -278,13 +278,13 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte
}
}
void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
{
if (actually_store)
check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes));
if (addr & (len - 1)) {
bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags);
bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt;
if (!is_misaligned_enabled())
throw trap_store_address_misaligned(gva, addr, 0, 0);

56
riscv/mmu.h

@ -38,6 +38,12 @@ struct tlb_entry_t {
reg_t target_offset;
};
struct xlate_flags_t {
const bool forced_virt : 1;
const bool hlvx : 1;
const bool lr : 1;
};
void throw_access_exception(bool virt, reg_t addr, access_type type);
// this class implements a processor's port into the virtual memory system.
@ -51,18 +57,14 @@ public:
mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc);
~mmu_t();
#define RISCV_XLATE_FORCED_VIRT (1U << 0)
#define RISCV_XLATE_VIRT_HLVX (1U << 1)
#define RISCV_XLATE_LR (1U << 2)
template<typename T>
T ALWAYS_INLINE load(reg_t addr, uint32_t xlate_flags = 0) {
T ALWAYS_INLINE load(reg_t addr, xlate_flags_t xlate_flags = {false, false, false}) {
target_endian<T> res;
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn;
if (likely(xlate_flags == 0 && aligned && tlb_hit)) {
if (likely(!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && aligned && tlb_hit)) {
res = *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
} else {
load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags);
@ -76,26 +78,35 @@ public:
template<typename T>
T load_reserved(reg_t addr) {
return load<T>(addr, RISCV_XLATE_LR);
bool forced_virt = false;
bool hlvx = false;
bool lr = true;
return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
T guest_load(reg_t addr) {
return load<T>(addr, RISCV_XLATE_FORCED_VIRT);
bool forced_virt = true;
bool hlvx = false;
bool lr = false;
return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
T guest_load_x(reg_t addr) {
return load<T>(addr, RISCV_XLATE_FORCED_VIRT|RISCV_XLATE_VIRT_HLVX);
bool forced_virt = true;
bool hlvx = true;
bool lr = false;
return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
void ALWAYS_INLINE store(reg_t addr, T val, uint32_t xlate_flags = 0) {
void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {false, false, false}) {
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn;
if (xlate_flags == 0 && likely(aligned && tlb_hit)) {
if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && likely(aligned && tlb_hit)) {
*(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val);
} else {
target_endian<T> target_val = to_target(val);
@ -108,7 +119,10 @@ public:
template<typename T>
void guest_store(reg_t addr, T val) {
store(addr, val, RISCV_XLATE_FORCED_VIRT);
bool forced_virt = true;
bool hlvx = false;
bool lr = false;
store(addr, val, {forced_virt, hlvx, lr});
}
// AMO/Zicbom faults should be reported as store faults
@ -130,7 +144,7 @@ public:
template<typename T, typename op>
T amo(reg_t addr, op f) {
convert_load_traps_to_store_traps({
store_slow_path(addr, sizeof(T), nullptr, 0, false, true);
store_slow_path(addr, sizeof(T), nullptr, {false, false, false}, false, true);
auto lhs = load<T>(addr);
store<T>(addr, f(lhs));
return lhs;
@ -164,7 +178,7 @@ public:
void clean_inval(reg_t addr, bool clean, bool inval) {
convert_load_traps_to_store_traps({
const reg_t paddr = translate(addr, blocksz, LOAD, 0) & ~(blocksz - 1);
const reg_t paddr = translate(addr, blocksz, LOAD, {false, false, false}) & ~(blocksz - 1);
if (sim->reservable(paddr)) {
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.clean_invalidate(paddr, blocksz, clean, inval);
@ -183,10 +197,10 @@ public:
{
if (vaddr & (size-1)) {
// Raise either access fault or misaligned exception
store_slow_path(vaddr, size, nullptr, 0, false, true);
store_slow_path(vaddr, size, nullptr, {false, false, false}, false, true);
}
reg_t paddr = translate(vaddr, 1, STORE, 0);
reg_t paddr = translate(vaddr, 1, STORE, {false, false, false});
if (sim->reservable(paddr))
return load_reservation_address == paddr;
else
@ -332,17 +346,17 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store);
bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes);
bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type);
bool mmio_ok(reg_t paddr, access_type type);
void check_triggers(triggers::operation_t operation, reg_t address, std::optional<reg_t> data = std::nullopt);
reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags);
reg_t translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags);
reg_t pte_load(reg_t pte_paddr, reg_t addr, bool virt, access_type trap_type, size_t ptesize) {
if (ptesize == 4)

Loading…
Cancel
Save