Browse Source

Implement support for big-endian targets

pull/575/head
Marcus Comstedt 6 years ago
parent
commit
8d09d845a7
  1. 15
      fesvr/elfloader.cc
  2. 4
      fesvr/htif.cc
  3. 26
      fesvr/htif.h
  4. 19
      fesvr/memif.h
  5. 10
      fesvr/syscall.cc
  6. 11
      riscv/mmu.cc
  7. 57
      riscv/mmu.h
  8. 5
      riscv/riscv.ac
  9. 44
      riscv/sim.cc
  10. 2
      riscv/sim.h

15
fesvr/elfloader.cc

@ -31,7 +31,7 @@ std::map<std::string, uint64_t> load_elf(const char* fn, memif_t* memif, reg_t*
assert(size >= sizeof(Elf64_Ehdr));
const Elf64_Ehdr* eh64 = (const Elf64_Ehdr*)buf;
assert(IS_ELF32(*eh64) || IS_ELF64(*eh64));
assert(IS_ELFLE(*eh64));
assert(IS_ELFLE(*eh64) || IS_ELFBE(*eh64));
assert(IS_ELF_EXEC(*eh64));
assert(IS_ELF_RISCV(*eh64) || IS_ELF_EM_NONE(*eh64));
assert(IS_ELF_VCURRENT(*eh64));
@ -83,10 +83,23 @@ std::map<std::string, uint64_t> load_elf(const char* fn, memif_t* memif, reg_t*
} \
} while(0)
if (IS_ELFLE(*eh64)) {
memif->set_target_endianness(memif_endianness_little);
if (IS_ELF32(*eh64))
LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_le);
else
LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_le);
} else {
#ifndef RISCV_ENABLE_DUAL_ENDIAN
throw std::invalid_argument("Specified ELF is big endian. Configure with --enable-dual-endian to enable support");
#else
memif->set_target_endianness(memif_endianness_big);
if (IS_ELF32(*eh64))
LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_be);
else
LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_be);
#endif
}
munmap(buf, size);

4
fesvr/htif.cc

@ -216,7 +216,7 @@ int htif_t::run()
while (!signal_exit && exitcode == 0)
{
if (auto tohost = from_le(mem.read_uint64(tohost_addr))) {
if (auto tohost = from_target(mem.read_uint64(tohost_addr))) {
mem.write_uint64(tohost_addr, 0);
command_t cmd(mem, tohost, fromhost_callback);
device_list.handle_command(cmd);
@ -227,7 +227,7 @@ int htif_t::run()
device_list.tick();
if (!fromhost_queue.empty() && mem.read_uint64(fromhost_addr) == 0) {
mem.write_uint64(fromhost_addr, to_le(fromhost_queue.front()));
mem.write_uint64(fromhost_addr, to_target(fromhost_queue.front()));
fromhost_queue.pop();
}
}

26
fesvr/htif.h

@ -6,9 +6,11 @@
#include "memif.h"
#include "syscall.h"
#include "device.h"
#include "byteorder.h"
#include <string.h>
#include <map>
#include <vector>
#include <assert.h>
class htif_t : public chunked_memif_t
{
@ -27,6 +29,30 @@ class htif_t : public chunked_memif_t
virtual memif_t& memif() { return mem; }
template<typename T> inline T from_target(T n) const
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
memif_endianness_t endianness = get_target_endianness();
assert(endianness == memif_endianness_little || endianness == memif_endianness_big);
return endianness == memif_endianness_big? from_be(n) : from_le(n);
#else
return from_le(n);
#endif
}
template<typename T> inline T to_target(T n) const
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
memif_endianness_t endianness = get_target_endianness();
assert(endianness == memif_endianness_little || endianness == memif_endianness_big);
return endianness == memif_endianness_big? to_be(n) : to_le(n);
#else
return to_le(n);
#endif
}
protected:
virtual void reset() = 0;

19
fesvr/memif.h

@ -10,6 +10,12 @@ typedef uint64_t reg_t;
typedef int64_t sreg_t;
typedef reg_t addr_t;
typedef enum {
memif_endianness_undecided,
memif_endianness_little,
memif_endianness_big
} memif_endianness_t;
class chunked_memif_t
{
public:
@ -19,6 +25,11 @@ public:
virtual size_t chunk_align() = 0;
virtual size_t chunk_max_size() = 0;
virtual void set_target_endianness(memif_endianness_t endianness) {}
virtual memif_endianness_t get_target_endianness() const {
return memif_endianness_undecided;
}
};
class memif_t
@ -55,6 +66,14 @@ public:
virtual void write_uint64(addr_t addr, uint64_t val);
virtual void write_int64(addr_t addr, int64_t val);
// endianness
virtual void set_target_endianness(memif_endianness_t endianness) {
cmemif->set_target_endianness(endianness);
}
virtual memif_endianness_t get_target_endianness() const {
return cmemif->get_target_endianness();
}
protected:
chunked_memif_t* cmemif;
};

10
fesvr/syscall.cc

@ -300,21 +300,21 @@ reg_t syscall_t::sys_getmainvars(reg_t pbuf, reg_t limit, reg_t a2, reg_t a3, re
{
std::vector<std::string> args = htif->target_args();
std::vector<uint64_t> words(args.size() + 3);
words[0] = to_le(args.size());
words[0] = htif->to_target<uint64_t>(args.size());
words[args.size()+1] = 0; // argv[argc] = NULL
words[args.size()+2] = 0; // envp[0] = NULL
size_t sz = (args.size() + 3) * sizeof(words[0]);
for (size_t i = 0; i < args.size(); i++)
{
words[i+1] = to_le(sz + pbuf);
words[i+1] = htif->to_target<uint64_t>(sz + pbuf);
sz += args[i].length() + 1;
}
std::vector<char> bytes(sz);
memcpy(&bytes[0], &words[0], sizeof(words[0]) * words.size());
for (size_t i = 0; i < args.size(); i++)
strcpy(&bytes[from_le(words[i+1]) - pbuf], args[i].c_str());
strcpy(&bytes[htif->from_target(words[i+1]) - pbuf], args[i].c_str());
if (bytes.size() > limit)
return -ENOMEM;
@ -343,11 +343,11 @@ void syscall_t::dispatch(reg_t mm)
reg_t magicmem[8];
memif->read(mm, sizeof(magicmem), magicmem);
reg_t n = from_le(magicmem[0]);
reg_t n = htif->from_target(magicmem[0]);
if (n >= table.size() || !table[n])
throw std::runtime_error("bad syscall #" + std::to_string(n));
magicmem[0] = to_le((this->*table[n])(from_le(magicmem[1]), from_le(magicmem[2]), from_le(magicmem[3]), from_le(magicmem[4]), from_le(magicmem[5]), from_le(magicmem[6]), from_le(magicmem[7])));
magicmem[0] = htif->to_target((this->*table[n])(htif->from_target(magicmem[1]), htif->from_target(magicmem[2]), htif->from_target(magicmem[3]), htif->from_target(magicmem[4]), htif->from_target(magicmem[5]), htif->from_target(magicmem[6]), htif->from_target(magicmem[7])));
memif->write(mm, sizeof(magicmem), magicmem);
}

11
riscv/mmu.cc

@ -6,6 +6,9 @@
mmu_t::mmu_t(simif_t* sim, processor_t* proc)
: sim(sim), proc(proc),
#ifdef RISCV_ENABLE_DUAL_ENDIAN
target_big_endian(false),
#endif
check_triggers_fetch(false),
check_triggers_load(false),
check_triggers_store(false),
@ -319,7 +322,7 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty
throw_access_exception(gva, trap_type);
}
reg_t pte = vm.ptesize == 4 ? from_le(*(uint32_t*)ppte) : from_le(*(uint64_t*)ppte);
reg_t pte = vm.ptesize == 4 ? from_target(*(uint32_t*)ppte) : from_target(*(uint64_t*)ppte);
reg_t ppn = pte >> PTE_PPN_SHIFT;
if (PTE_TABLE(pte)) { // next level of page table
@ -341,7 +344,7 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty
if ((pte & ad) != ad) {
if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S))
throw_access_exception(gva, trap_type);
*(uint32_t*)ppte |= to_le((uint32_t)ad);
*(uint32_t*)ppte |= to_target((uint32_t)ad);
}
#else
// take exception if access or possibly dirty bit is not set.
@ -392,7 +395,7 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool mxr)
if (!ppte || !pmp_ok(pte_paddr, vm.ptesize, LOAD, PRV_S))
throw_access_exception(addr, type);
reg_t pte = vm.ptesize == 4 ? from_le(*(uint32_t*)ppte) : from_le(*(uint64_t*)ppte);
reg_t pte = vm.ptesize == 4 ? from_target(*(uint32_t*)ppte) : from_target(*(uint64_t*)ppte);
reg_t ppn = pte >> PTE_PPN_SHIFT;
if (PTE_TABLE(pte)) { // next level of page table
@ -414,7 +417,7 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool mxr)
if ((pte & ad) != ad) {
if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S))
throw_access_exception(addr, type);
*(uint32_t*)ppte |= to_le((uint32_t)ad);
*(uint32_t*)ppte |= to_target((uint32_t)ad);
}
#else
// take exception if access or possibly dirty bit is not set.

57
riscv/mmu.h

@ -101,10 +101,10 @@ public:
size_t size = sizeof(type##_t); \
if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \
if (proc) READ_MEM(addr, size); \
return from_le(*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
return from_target(*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
} \
if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
type##_t data = from_le(*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
type##_t data = from_target(*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
if (matched_trigger) \
@ -118,7 +118,7 @@ public:
if (proc) READ_MEM(addr, size); \
if (xlate_flags) \
flush_tlb(); \
return from_le(res); \
return from_target(res); \
}
// load value from memory at aligned address; zero extend to register width
@ -165,7 +165,7 @@ public:
size_t size = sizeof(type##_t); \
if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
if (proc) WRITE_MEM(addr, val, size); \
*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_le(val); \
*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
if (!matched_trigger) { \
@ -174,11 +174,11 @@ public:
throw *matched_trigger; \
} \
if (proc) WRITE_MEM(addr, val, size); \
*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_le(val); \
*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
else { \
type##_t le_val = to_le(val); \
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&le_val, (xlate_flags)); \
type##_t target_val = to_target(val); \
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags)); \
if (proc) WRITE_MEM(addr, val, size); \
} \
if (xlate_flags) \
@ -342,6 +342,42 @@ public:
#endif
}
void set_target_big_endian(bool enable)
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
target_big_endian = enable;
#else
assert(enable == false);
#endif
}
bool is_target_big_endian()
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
return target_big_endian;
#else
return false;
#endif
}
template<typename T> inline T from_target(T n) const
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
return target_big_endian? from_be(n) : from_le(n);
#else
return from_le(n);
#endif
}
template<typename T> inline T to_target(T n) const
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
return target_big_endian? to_be(n) : to_le(n);
#else
return to_le(n);
#endif
}
private:
simif_t* sim;
processor_t* proc;
@ -394,9 +430,9 @@ private:
}
if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
int match = proc->trigger_match(OPERATION_EXECUTE, addr, from_le(*ptr));
int match = proc->trigger_match(OPERATION_EXECUTE, addr, from_target(*ptr));
if (match >= 0) {
throw trigger_matched_t(match, OPERATION_EXECUTE, addr, from_le(*ptr));
throw trigger_matched_t(match, OPERATION_EXECUTE, addr, from_target(*ptr));
}
}
return result;
@ -424,6 +460,9 @@ private:
reg_t pmp_homogeneous(reg_t addr, reg_t len);
reg_t pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode);
#ifdef RISCV_ENABLE_DUAL_ENDIAN
bool target_big_endian;
#endif
bool check_triggers_fetch;
bool check_triggers_load;
bool check_triggers_store;

5
riscv/riscv.ac

@ -45,3 +45,8 @@ AC_ARG_ENABLE([misaligned], AS_HELP_STRING([--enable-misaligned], [Enable hardwa
AS_IF([test "x$enable_misaligned" = "xyes"], [
AC_DEFINE([RISCV_ENABLE_MISALIGNED],,[Enable hardware support for misaligned loads and stores])
])
AC_ARG_ENABLE([dual-endian], AS_HELP_STRING([--enable-dual-endian], [Enable support for running target in either endianness]))
AS_IF([test "x$enable_dual_endian" = "xyes"], [
AC_DEFINE([RISCV_ENABLE_DUAL_ENDIAN],,[Enable support for running target in either endianness])
])

44
riscv/sim.cc

@ -254,8 +254,22 @@ void sim_t::set_rom()
(uint32_t) (start_pc & 0xffffffff),
(uint32_t) (start_pc >> 32)
};
for(int i = 0; i < reset_vec_size; i++)
if (get_target_endianness() == memif_endianness_big) {
int i;
// Instuctions are little endian
for (i = 0; reset_vec[i] != 0; i++)
reset_vec[i] = to_le(reset_vec[i]);
// Data is big endian
for (; i < reset_vec_size; i++)
reset_vec[i] = to_be(reset_vec[i]);
// Correct the high/low order of 64-bit start PC
if (get_core(0)->get_xlen() != 32)
std::swap(reset_vec[reset_vec_size-2], reset_vec[reset_vec_size-1]);
} else {
for (int i = 0; i < reset_vec_size; i++)
reset_vec[i] = to_le(reset_vec[i]);
}
std::vector<char> rom((char*)reset_vec, (char*)reset_vec + sizeof(reset_vec));
@ -315,7 +329,7 @@ void sim_t::idle()
void sim_t::read_chunk(addr_t taddr, size_t len, void* dst)
{
assert(len == 8);
auto data = to_le(debug_mmu->load_uint64(taddr));
auto data = debug_mmu->to_target(debug_mmu->load_uint64(taddr));
memcpy(dst, &data, sizeof data);
}
@ -324,7 +338,31 @@ void sim_t::write_chunk(addr_t taddr, size_t len, const void* src)
assert(len == 8);
uint64_t data;
memcpy(&data, src, sizeof data);
debug_mmu->store_uint64(taddr, from_le(data));
debug_mmu->store_uint64(taddr, debug_mmu->from_target(data));
}
void sim_t::set_target_endianness(memif_endianness_t endianness)
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
assert(endianness == memif_endianness_little || endianness == memif_endianness_big);
bool enable = endianness == memif_endianness_big;
debug_mmu->set_target_big_endian(enable);
for (size_t i = 0; i < procs.size(); i++) {
procs[i]->get_mmu()->set_target_big_endian(enable);
}
#else
assert(endianness == memif_endianness_little);
#endif
}
memif_endianness_t sim_t::get_target_endianness() const
{
#ifdef RISCV_ENABLE_DUAL_ENDIAN
return debug_mmu->is_target_big_endian()? memif_endianness_big : memif_endianness_little;
#else
return memif_endianness_little;
#endif
}
void sim_t::proc_reset(unsigned id)

2
riscv/sim.h

@ -138,6 +138,8 @@ private:
void write_chunk(addr_t taddr, size_t len, const void* src);
size_t chunk_align() { return 8; }
size_t chunk_max_size() { return 8; }
void set_target_endianness(memif_endianness_t endianness);
memif_endianness_t get_target_endianness() const;
public:
// Initialize this after procs, because in debug_module_t::reset() we

Loading…
Cancel
Save