Browse Source

Update trigger behavior for memory accesses to match recommended debug specification behavior

According to the debug specification (select bit description):

0 (address): There is at least one compare value and it contains
the lowest virtual address of the access. In addition, it is recommended
that there are additional compare values for the other accessed
virtual addresses match. (E.g. on a 32-bit read from 0x4000,
the lowest address is 0x4000 and the other addresses
are 0x4001, 0x4002, and 0x4003.)

1 (data): There is exactly one compare value and it contains
the data value loaded or stored, or the instruction executed.
Any bits beyond the size of the data access will contain 0.

Previously, when select bit was 0, Spike did not follow
the recommendation and provided only 1 matching value to the trigger module.
This change modifies the behavior to the recommended one.

The implementation follows the debug specification recommendation.

Signed-off-by: Farid Khaydari <f.khaydari@syntacore.com>
pull/2161/head
Farid Khaydari 3 months ago
committed by Farid Khaidari
parent
commit
06fd4d8526
  1. 103
      riscv/mmu.cc
  2. 35
      riscv/mmu.h
  3. 47
      riscv/triggers.cc
  4. 6
      riscv/triggers.h

103
riscv/mmu.cc

@ -3,10 +3,14 @@
#include "config.h"
#include "mmu.h"
#include "arith.h"
#include "memif.h"
#include "simif.h"
#include "processor.h"
#include "decode_macros.h"
#include "platform.h"
#include "triggers.h"
#include <cassert>
mmu_t::mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc, reg_t cache_blocksz)
: sim(sim), proc(proc), blocksz(cache_blocksz),
@ -109,7 +113,8 @@ mmu_t::insn_parcel_t mmu_t::fetch_slow_path(reg_t vaddr)
auto access_info = generate_access_info(vaddr, FETCH, {});
if (check_triggers_fetch)
check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt);
check_triggers(triggers::OPERATION_EXECUTE, vaddr,
access_info.effective_virt, sizeof(insn_parcel_t));
if (!tlb_hit) {
paddr = translate(access_info, sizeof(insn_parcel_t));
@ -128,13 +133,16 @@ mmu_t::insn_parcel_t mmu_t::fetch_slow_path(reg_t vaddr)
auto res = perform_intrapage_fetch(vaddr, host_addr, paddr);
if (check_triggers_fetch)
check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt, from_le(res));
if (!check_triggers_fetch)
return res;
check_triggers(triggers::OPERATION_EXECUTE, vaddr,
access_info.effective_virt, sizeof(insn_parcel_t), from_le(res));
return res;
}
reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
static reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
{
switch (len) {
case 1:
@ -211,25 +219,41 @@ bool mmu_t::mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type)
return true;
}
void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool virt, reg_t tval, std::optional<reg_t> data)
void mmu_t::check_triggers(triggers::operation_t operation,
reg_t addr, bool virt, std::size_t data_size, const std::uint8_t* bytes)
{
assert(data_size > 0);
assert(data_size <= sizeof(reg_t));
check_triggers(operation, addr, virt,
data_size, reg_from_bytes(data_size, bytes));
}
void mmu_t::check_triggers(triggers::operation_t operation,
reg_t addr, bool virt, size_t access_len)
{
check_triggers(operation, addr, virt, access_len, std::nullopt);
}
void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::size_t size, std::optional<reg_t> data)
{
if (matched_trigger || !proc)
return;
auto match = proc->TM.detect_memory_access_match(operation, address, data);
auto match = proc->TM.detect_memory_access_match(operation, address, size, data);
if (!match.has_value())
return;
if (match.has_value())
switch (match->timing) {
case triggers::TIMING_BEFORE:
throw triggers::matched_t(operation, tval, match->action, virt);
switch (match->timing) {
case triggers::TIMING_BEFORE:
throw triggers::matched_t(operation, address, match->action, virt);
case triggers::TIMING_AFTER:
// We want to take this exception on the next instruction. We check
// whether to do so in the I$ refill slow path, which we can force by
// flushing the TLB.
flush_tlb();
matched_trigger = triggers::matched_t(operation, tval, match->action, virt);
}
case triggers::TIMING_AFTER:
// We want to take this exception on the next instruction. We check
// whether to do so in the I$ refill slow path, which we can force by
// flushing the TLB.
flush_tlb();
matched_trigger = triggers::matched_t(operation, address, match->action, virt);
}
}
inline void mmu_t::perform_intrapage_load(reg_t vaddr, uintptr_t host_addr, reg_t paddr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
@ -272,8 +296,11 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_
}
}
void mmu_t::load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
void mmu_t::load_slow_path(reg_t original_addr, std::size_t len,
std::uint8_t* bytes, xlate_flags_t xlate_flags)
{
assert(len > 0);
if (likely(!xlate_flags.is_special_access())) {
// Fast path for simple cases
auto [tlb_hit, host_addr, paddr] = access_tlb(tlb_load, original_addr, TLB_FLAGS & ~TLB_CHECK_TRIGGERS);
@ -289,7 +316,8 @@ void mmu_t::load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate
reg_t transformed_addr = access_info.transformed_vaddr;
if (check_triggers_load)
check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt);
check_triggers(triggers::OPERATION_LOAD,
transformed_addr, access_info.effective_virt, len);
if ((transformed_addr & (len - 1)) == 0) {
load_slow_path_intrapage(len, bytes, access_info);
@ -301,7 +329,7 @@ void mmu_t::load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate
if (access_info.flags.lr)
throw trap_load_access_fault(gva, transformed_addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - transformed_addr % PGSIZE);
reg_t len_page0 = std::min<reg_t>(len, PGSIZE - transformed_addr % PGSIZE);
load_slow_path_intrapage(len_page0, bytes, access_info);
if (len_page0 != len) {
auto tail_access_info = generate_access_info(original_addr + len_page0, LOAD, xlate_flags);
@ -309,16 +337,14 @@ void mmu_t::load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate
}
}
if (check_triggers_load) {
while (len > sizeof(reg_t)) {
check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), bytes));
len -= sizeof(reg_t);
bytes += sizeof(reg_t);
}
check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt, reg_from_bytes(len, bytes));
}
if (!proc)
return;
if (check_triggers_load)
check_triggers(triggers::OPERATION_LOAD,
transformed_addr, access_info.effective_virt, len, bytes);
if (proc && unlikely(proc->get_log_commits_enabled()))
if (unlikely(proc->get_log_commits_enabled()))
proc->state.log_mem_read.push_back(std::make_tuple(original_addr, 0, len));
}
@ -359,7 +385,9 @@ void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_acces
perform_intrapage_store(vaddr, host_addr, paddr, len, bytes, access_info.flags);
}
void mmu_t::store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
void mmu_t::store_slow_path(reg_t original_addr, std::size_t len,
const std::uint8_t* bytes, xlate_flags_t xlate_flags,
bool actually_store, bool UNUSED require_alignment)
{
if (likely(!xlate_flags.is_special_access())) {
// Fast path for simple cases
@ -377,16 +405,9 @@ void mmu_t::store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes
auto access_info = generate_access_info(original_addr, STORE, xlate_flags);
reg_t transformed_addr = access_info.transformed_vaddr;
if (actually_store && check_triggers_store) {
reg_t trig_len = len;
const uint8_t* trig_bytes = bytes;
while (trig_len > sizeof(reg_t)) {
check_triggers(triggers::OPERATION_STORE, transformed_addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), trig_bytes));
trig_len -= sizeof(reg_t);
trig_bytes += sizeof(reg_t);
}
check_triggers(triggers::OPERATION_STORE, transformed_addr, access_info.effective_virt, reg_from_bytes(trig_len, trig_bytes));
}
if (actually_store && check_triggers_store)
check_triggers(triggers::OPERATION_STORE,
transformed_addr, access_info.effective_virt, len, bytes);
if (transformed_addr & (len - 1)) {
bool gva = access_info.effective_virt;
@ -396,7 +417,7 @@ void mmu_t::store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes
if (require_alignment)
throw trap_store_access_fault(gva, transformed_addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - transformed_addr % PGSIZE);
reg_t len_page0 = std::min<reg_t>(len, PGSIZE - transformed_addr % PGSIZE);
store_slow_path_intrapage(len_page0, bytes, access_info, actually_store);
if (len_page0 != len) {
auto tail_access_info = generate_access_info(original_addr + len_page0, STORE, xlate_flags);

35
riscv/mmu.h

@ -13,8 +13,11 @@
#include "../fesvr/byteorder.h"
#include "triggers.h"
#include "cfg.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <stdlib.h>
#include <vector>
// virtual memory configuration
#define PGSHIFT 12
@ -216,10 +219,9 @@ public:
reg_t transformed_addr = access_info.transformed_vaddr;
auto base = transformed_addr & ~(blocksz - 1);
for (size_t offset = 0; offset < blocksz; offset += 1) {
check_triggers(triggers::OPERATION_STORE, base + offset, false, transformed_addr, std::nullopt);
check_triggers(triggers::OPERATION_STORE, base, false, blocksz);
for (size_t offset = 0; offset < blocksz; offset += 1)
store<uint8_t>(base + offset, 0);
}
}
void clean_inval(reg_t addr, bool clean, bool inval) {
@ -227,8 +229,7 @@ public:
reg_t transformed_addr = access_info.transformed_vaddr;
auto base = transformed_addr & ~(blocksz - 1);
for (size_t offset = 0; offset < blocksz; offset += 1)
check_triggers(triggers::OPERATION_STORE, base + offset, false, transformed_addr, std::nullopt);
check_triggers(triggers::OPERATION_STORE, base, false, blocksz);
convert_load_traps_to_store_traps({
const reg_t paddr = translate(access_info, 1);
if (sim->reservable(paddr)) {
@ -414,23 +415,33 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
typedef uint16_t insn_parcel_t;
insn_parcel_t fetch_slow_path(reg_t addr);
insn_parcel_t perform_intrapage_fetch(reg_t vaddr, uintptr_t host_addr, reg_t paddr);
void load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
void load_slow_path(reg_t original_addr, std::size_t len,
std::uint8_t* bytes, xlate_flags_t xlate_flags);
void load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info);
void perform_intrapage_load(reg_t vaddr, uintptr_t host_addr, reg_t paddr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
void store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path(reg_t original_addr, std::size_t len, const std::uint8_t* bytes,
xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store);
void perform_intrapage_store(reg_t vaddr, uintptr_t host_addr, reg_t paddr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags);
bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes);
bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type);
bool mmio_ok(reg_t paddr, access_type type);
void check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::optional<reg_t> data = std::nullopt) {
check_triggers(operation, address, virt, address, data);
}
void check_triggers(triggers::operation_t operation, reg_t address, bool virt, reg_t tval, std::optional<reg_t> data);
void check_triggers(triggers::operation_t operation,
reg_t addr, bool virt, std::size_t data_size, const std::uint8_t* bytes);
void check_triggers(triggers::operation_t operation,
reg_t addr, bool virt, std::size_t access_len);
void check_triggers(triggers::operation_t operation, reg_t address,
bool virt, std::size_t size, std::optional<reg_t> data);
bool svukte_qualified(mem_access_info_t access_info);
bool svukte_fault(reg_t addr, mem_access_info_t access_info);
reg_t translate(mem_access_info_t access_info, reg_t len);

47
riscv/triggers.cc

@ -208,6 +208,8 @@ void mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val, const b
}
bool mcontrol_common_t::simple_match(unsigned xlen, reg_t value) const {
value &= make_mask64(0, xlen);
switch (match) {
case MATCH_EQUAL:
return value == tdata2;
@ -238,7 +240,10 @@ bool mcontrol_common_t::simple_match(unsigned xlen, reg_t value) const {
assert(0);
}
std::optional<match_result_t> mcontrol_common_t::detect_memory_access_match(processor_t * const proc, operation_t operation, reg_t address, std::optional<reg_t> data) noexcept {
std::optional<match_result_t> mcontrol_common_t::detect_memory_access_match(
processor_t * const proc, operation_t operation, reg_t address,
std::size_t len, std::optional<reg_t> data) noexcept
{
if ((operation == triggers::OPERATION_EXECUTE && !execute) ||
(operation == triggers::OPERATION_STORE && !store) ||
(operation == triggers::OPERATION_LOAD && !load) ||
@ -246,28 +251,30 @@ std::optional<match_result_t> mcontrol_common_t::detect_memory_access_match(proc
return std::nullopt;
}
reg_t value;
if (select) {
if (!data.has_value())
return std::nullopt;
value = *data;
} else {
value = address;
}
// We need this because in 32-bit mode sometimes the PC bits get sign
// extended.
auto xlen = proc->get_xlen();
if (xlen == 32) {
value &= 0xffffffff;
if (!select) {
for (std::size_t i = 0; i < len; ++i)
if (simple_match(xlen, address + i)) {
/* This is OK because this function is only called if the trigger was
* not inhibited by the previous trigger in the chain. */
set_hit(timing ? HIT_IMMEDIATELY_AFTER : HIT_BEFORE);
return match_result_t(timing_t(timing), action);
}
return std::nullopt;
}
if (simple_match(xlen, value)) {
/* This is OK because this function is only called if the trigger was not
* inhibited by the previous trigger in the chain. */
if (len * 8 > proc->get_xlen())
return std::nullopt;
if (data.has_value() && simple_match(xlen, data.value())) {
/* This is OK because this function is only called if the trigger was
* not inhibited by the previous trigger in the chain. */
set_hit(timing ? HIT_IMMEDIATELY_AFTER : HIT_BEFORE);
return match_result_t(timing_t(timing), action);
}
return std::nullopt;
}
@ -600,7 +607,9 @@ bool module_t::tdata3_write(unsigned index, const reg_t val) noexcept
return true;
}
std::optional<match_result_t> module_t::detect_memory_access_match(operation_t operation, reg_t address, std::optional<reg_t> data) noexcept
std::optional<match_result_t> module_t::detect_memory_access_match(
operation_t operation, reg_t address, std::size_t len,
std::optional<reg_t> data) noexcept
{
state_t * const state = proc->get_state();
if (state->debug_mode)
@ -621,7 +630,7 @@ std::optional<match_result_t> module_t::detect_memory_access_match(operation_t o
* entire chain did not match. This is allowed by the spec, because the final
* trigger in the chain will never get `hit` set unless the entire chain
* matches. */
auto result = trigger->detect_memory_access_match(proc, operation, address, data);
auto result = trigger->detect_memory_access_match(proc, operation, address, len, data);
if (result.has_value() && !trigger->get_chain() && (!ret.has_value() || ret->action < result->action))
ret = result;

6
riscv/triggers.h

@ -91,7 +91,7 @@ public:
virtual void stash_read_values() {}
virtual std::optional<match_result_t> detect_memory_access_match(processor_t UNUSED * const proc,
operation_t UNUSED operation, reg_t UNUSED address, std::optional<reg_t> UNUSED data) noexcept { return std::nullopt; }
operation_t UNUSED operation, reg_t UNUSED address, std::size_t UNUSED len, std::optional<reg_t> UNUSED data) noexcept { return std::nullopt; }
virtual std::optional<match_result_t> detect_icount_fire(processor_t UNUSED * const proc) { return std::nullopt; }
virtual void detect_icount_decrement(processor_t UNUSED * const proc) {}
virtual std::optional<match_result_t> detect_trap_match(processor_t UNUSED * const proc, const trap_t UNUSED & t) noexcept { return std::nullopt; }
@ -214,7 +214,7 @@ public:
virtual void set_hit(hit_t val) = 0;
virtual std::optional<match_result_t> detect_memory_access_match(processor_t * const proc,
operation_t operation, reg_t address, std::optional<reg_t> data) noexcept override;
operation_t operation, reg_t address, std::size_t len, std::optional<reg_t> data) noexcept override;
private:
bool simple_match(unsigned xlen, reg_t value) const;
@ -292,7 +292,7 @@ public:
unsigned count() const { return triggers.size(); }
std::optional<match_result_t> detect_memory_access_match(operation_t operation, reg_t address, std::optional<reg_t> data) noexcept;
std::optional<match_result_t> detect_memory_access_match(operation_t operation, reg_t address, std::size_t len, std::optional<reg_t> data) noexcept;
std::optional<match_result_t> detect_icount_match() noexcept;
std::optional<match_result_t> detect_trap_match(const trap_t& t) noexcept;

Loading…
Cancel
Save