|
|
|
@ -147,28 +147,32 @@ public: |
|
|
|
|
|
|
|
// template for functions that store an aligned value to memory
|
|
|
|
#define store_func(type, prefix, xlate_flags) \ |
|
|
|
void prefix##_##type(reg_t addr, type##_t val) { \ |
|
|
|
void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true) { \ |
|
|
|
if (unlikely(addr & (sizeof(type##_t)-1))) \ |
|
|
|
return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ |
|
|
|
reg_t vpn = addr >> PGSHIFT; \ |
|
|
|
size_t size = sizeof(type##_t); \ |
|
|
|
if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ |
|
|
|
if (proc) WRITE_MEM(addr, val, size); \ |
|
|
|
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ |
|
|
|
if (actually_store) { \ |
|
|
|
if (proc) WRITE_MEM(addr, val, size); \ |
|
|
|
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ |
|
|
|
} \ |
|
|
|
} \ |
|
|
|
else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ |
|
|
|
if (!matched_trigger) { \ |
|
|
|
matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ |
|
|
|
if (matched_trigger) \ |
|
|
|
throw *matched_trigger; \ |
|
|
|
if (actually_store) { \ |
|
|
|
if (!matched_trigger) { \ |
|
|
|
matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ |
|
|
|
if (matched_trigger) \ |
|
|
|
throw *matched_trigger; \ |
|
|
|
} \ |
|
|
|
if (proc) WRITE_MEM(addr, val, size); \ |
|
|
|
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ |
|
|
|
} \ |
|
|
|
if (proc) WRITE_MEM(addr, val, size); \ |
|
|
|
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ |
|
|
|
} \ |
|
|
|
else { \ |
|
|
|
target_endian<type##_t> target_val = to_target(val); \ |
|
|
|
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags)); \ |
|
|
|
if (proc) WRITE_MEM(addr, val, size); \ |
|
|
|
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \ |
|
|
|
if (actually_store && proc) WRITE_MEM(addr, val, size); \ |
|
|
|
} \ |
|
|
|
} |
|
|
|
|
|
|
|
@ -192,6 +196,7 @@ public: |
|
|
|
template<typename op> \ |
|
|
|
type##_t amo_##type(reg_t addr, op f) { \ |
|
|
|
convert_load_traps_to_store_traps({ \ |
|
|
|
store_##type(addr, 0, false); \ |
|
|
|
auto lhs = load_##type(addr, true); \ |
|
|
|
store_##type(addr, f(lhs)); \ |
|
|
|
return lhs; \ |
|
|
|
@ -438,7 +443,7 @@ private: |
|
|
|
// handle uncommon cases: TLB misses, page faults, MMIO
|
|
|
|
tlb_entry_t fetch_slow_path(reg_t addr); |
|
|
|
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); |
|
|
|
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags); |
|
|
|
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); |
|
|
|
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); |
|
|
|
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); |
|
|
|
bool mmio_ok(reg_t addr, access_type type); |
|
|
|
|