Browse Source

New virtual memory implementation (Sv39)

pull/18/head
Andrew Waterman 11 years ago
parent
commit
41fa048e93
  1. 55
      riscv/encoding.h
  2. 80
      riscv/mmu.cc
  3. 7
      riscv/mmu.h

55
riscv/encoding.h

@ -63,18 +63,39 @@
#define IMPL_ROCKET 2
// page table entry (PTE) fields
#define PTE_V 0x001 // Entry is a page Table descriptor
#define PTE_T 0x002 // Entry is a page Table, not a terminal node
#define PTE_G 0x004 // Global
#define PTE_UR 0x008 // User Write permission
#define PTE_UW 0x010 // User Read permission
#define PTE_UX 0x020 // User eXecute permission
#define PTE_SR 0x040 // Supervisor Read permission
#define PTE_SW 0x080 // Supervisor Write permission
#define PTE_SX 0x100 // Supervisor eXecute permission
#define PTE_R 0x200 // Referenced
#define PTE_D 0x400 // Dirty
#define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
#define PTE_TYPE 0x007
#define PTE_PERM 0x018
#define PTE_G 0x020 // Global
#define PTE_R 0x040 // Referenced
#define PTE_D 0x080 // Dirty
#define PTE_SOFT 0x300 // Reserved for Software
#define PTE_PPN_SHIFT 10
#define PTE_TYPE_INVALID 0
#define PTE_TYPE_TABLE 1
#define PTE_TYPE_U 2
#define PTE_TYPE_S 3
#define PTE_TYPE_US 4
#define PTE_TYPE_US_SR 4
#define PTE_TYPE_US_SRW 5
#define PTE_TYPE_US_SRX 6
#define PTE_TYPE_US_SRWX 7
#define PROT_TO_PERM(PROT) ((((PROT) & PROT_EXEC) ? 2 : 0) | (((PROT) & PROT_WRITE) ? 1 : 0))
#define PTE_CREATE(PPN, PERM_U, PERM_S) \
(((PPN) << PTE_PPN_SHIFT) | (PROT_TO_PERM(PERM_U) << 3) | \
((PERM_U) && (PERM_S) ? (PTE_TYPE_US | PROT_TO_PERM(PERM_S)) : \
(PERM_S) ? (PTE_TYPE_S | (PROT_TO_PERM(PERM_S) << 3)) : \
(PERM_U) ? PTE_TYPE_U : 0))
#define PTE_UR(PTE) ((0xF4F4F4F4U >> ((PTE) & 0x1f)) & 1)
#define PTE_UW(PTE) ((0xF400F400U >> ((PTE) & 0x1f)) & 1)
#define PTE_UX(PTE) ((0xF4F40000U >> ((PTE) & 0x1f)) & 1)
#define PTE_SR(PTE) ((0xF8F8F8F8U >> ((PTE) & 0x1f)) & 1)
#define PTE_SW(PTE) ((0xA8A0A8A0U >> ((PTE) & 0x1f)) & 1)
#define PTE_SX(PTE) ((0xC8C8C0C0U >> ((PTE) & 0x1f)) & 1)
#define PTE_CHECK_PERM(PTE, SUPERVISOR, WRITE, EXEC) \
((SUPERVISOR) ? ((WRITE) ? PTE_SW(PTE) : (EXEC) ? PTE_SX(PTE) : PTE_SR(PTE)) \
: ((WRITE) ? PTE_UW(PTE) : (EXEC) ? PTE_UX(PTE) : PTE_UR(PTE)))
#ifdef __riscv
@ -84,15 +105,15 @@
# define MSTATUS_HA MSTATUS64_HA
# define MSTATUS_SD MSTATUS64_SD
# define SSTATUS_SD SSTATUS64_SD
# define RISCV_PGLEVELS 3
# define RISCV_PGSHIFT 13
# define RISCV_PGLEVELS 3 /* Sv39 */
# define RISCV_PGLEVEL_BITS 9
#else
# define MSTATUS_SD MSTATUS32_SD
# define SSTATUS_SD SSTATUS32_SD
# define RISCV_PGLEVELS 2
# define RISCV_PGSHIFT 12
# define RISCV_PGLEVELS 2 /* Sv32 */
# define RISCV_PGLEVEL_BITS 10
#endif
#define RISCV_PGLEVEL_BITS 10
#define RISCV_PGSHIFT 12
#define RISCV_PGSIZE (1 << RISCV_PGSHIFT)
#ifndef __ASSEMBLER__

80
riscv/mmu.cc

@ -34,7 +34,6 @@ void* mmu_t::refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch)
reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
reg_t expected_tag = addr >> PGSHIFT;
reg_t pte = 0;
reg_t mstatus = proc ? proc->state.mstatus : 0;
bool vm_disabled = get_field(mstatus, MSTATUS_VM) == VM_MBARE;
@ -43,52 +42,48 @@ void* mmu_t::refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch)
bool mprv_m = get_field(mstatus, MSTATUS_MPRV) == PRV_M;
bool mprv_s = get_field(mstatus, MSTATUS_MPRV) == PRV_S;
reg_t want_perm = store ? (mode_s || (mode_m && mprv_s) ? PTE_SW : PTE_UW) :
!fetch ? (mode_s || (mode_m && mprv_s) ? PTE_SR : PTE_UR) :
(mode_s ? PTE_SX : PTE_UX);
reg_t pgbase;
if (vm_disabled || (mode_m && (mprv_m || fetch))) {
pgbase = addr & -PGSIZE;
// virtual memory is disabled. merely check legality of physical address.
if (addr < memsz) {
// produce a fake PTE for the TLB's benefit.
pte = PTE_V | PTE_UX | PTE_SX | ((addr >> PGSHIFT) << PGSHIFT);
if (vm_disabled || !(mode_m && !mprv_m))
pte |= PTE_UR | PTE_SR | PTE_UW | PTE_SW;
}
if (addr >= memsz)
pgbase = -1;
} else {
pte = walk(addr, want_perm);
}
if (!(pte & PTE_V) || !(pte & want_perm)) {
if (fetch)
throw trap_instruction_access_fault(addr);
if (store)
throw trap_store_access_fault(addr);
throw trap_load_access_fault(addr);
pgbase = walk(addr, mode_s || (mode_m && mprv_s), store, fetch);
}
reg_t pgoff = addr & (PGSIZE-1);
reg_t pgbase = pte >> PGSHIFT << PGSHIFT;
reg_t paddr = pgbase + pgoff;
if (pgbase == reg_t(-1)) {
if (fetch) throw trap_instruction_access_fault(addr);
else if (store) throw trap_store_access_fault(addr);
else throw trap_load_access_fault(addr);
}
if (unlikely(tracer.interested_in_range(pgbase, pgbase + PGSIZE, store, fetch)))
tracer.trace(paddr, bytes, store, fetch);
else
{
tlb_load_tag[idx] = (pte & (PTE_UR|PTE_SR)) ? expected_tag : -1;
tlb_store_tag[idx] = (pte & (PTE_UW|PTE_SW)) && store ? expected_tag : -1;
tlb_insn_tag[idx] = (pte & (PTE_UX|PTE_SX)) ? expected_tag : -1;
if (tlb_load_tag[idx] != expected_tag) tlb_load_tag[idx] = -1;
if (tlb_store_tag[idx] != expected_tag) tlb_store_tag[idx] = -1;
if (tlb_insn_tag[idx] != expected_tag) tlb_insn_tag[idx] = -1;
if (fetch) tlb_insn_tag[idx] = expected_tag;
else if (store) tlb_store_tag[idx] = expected_tag;
else tlb_load_tag[idx] = expected_tag;
tlb_data[idx] = mem + pgbase - (addr & ~(PGSIZE-1));
}
return mem + paddr;
}
pte_t mmu_t::walk(reg_t addr, reg_t perm)
pte_t mmu_t::walk(reg_t addr, bool supervisor, bool store, bool fetch)
{
reg_t msb_mask = -(reg_t(1) << (VA_BITS-1));
if ((addr & msb_mask) != 0 && (addr & msb_mask) != msb_mask)
return 0; // address isn't properly sign-extended
return -1; // address isn't properly sign-extended
reg_t base = proc->get_state()->sptbr;
@ -99,33 +94,34 @@ pte_t mmu_t::walk(reg_t addr, reg_t perm)
// check that physical address of PTE is legal
reg_t pte_addr = base + idx*sizeof(pte_t);
if (pte_addr >= memsz)
return 0;
return -1;
pte_t* ppte = (pte_t*)(mem+pte_addr);
reg_t ppn = *ppte >> PTE_PPN_SHIFT;
if (!(*ppte & PTE_V)) { // invalid mapping
return 0;
} else if (*ppte & PTE_T) { // next level of page table
base = (*ppte >> PGSHIFT) << PGSHIFT;
if ((*ppte & PTE_TYPE) == PTE_TYPE_TABLE) { // next level of page table
base = ppn << PGSHIFT;
} else {
// we've found the PTE. set referenced and possibly dirty bits.
if (*ppte & perm) {
*ppte |= PTE_R;
if (perm & (PTE_SW | PTE_UW))
*ppte |= PTE_D;
}
// we've found the PTE. check the permissions.
if (!PTE_CHECK_PERM(*ppte, supervisor, store, fetch))
return -1;
// set referenced and possibly dirty bits.
*ppte |= PTE_R;
if (store)
*ppte |= PTE_D;
// for superpage mappings, make a fake leaf PTE for the TLB's benefit.
reg_t vpn = addr >> PGSHIFT;
reg_t pte = *ppte | ((vpn & ((1<<(ptshift))-1)) << PGSHIFT);
reg_t addr = (ppn | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT;
// check that physical address is legal
if (((pte >> PGSHIFT) << PGSHIFT) >= memsz)
return 0;
if (addr >= memsz)
return -1;
return pte;
return addr;
}
}
return 0;
return -1;
}
void mmu_t::register_memtracer(memtracer_t* t)

7
riscv/mmu.h

@ -14,11 +14,10 @@
// virtual memory configuration
typedef reg_t pte_t;
const reg_t LEVELS = sizeof(pte_t) == 8 ? 3 : 2;
const reg_t PTIDXBITS = 10;
const reg_t PGSHIFT = PTIDXBITS + (sizeof(pte_t) == 8 ? 3 : 2);
const reg_t PGSHIFT = 12;
const reg_t PTIDXBITS = PGSHIFT - (sizeof(pte_t) == 8 ? 3 : 2);
const reg_t PGSIZE = 1 << PGSHIFT;
const reg_t VPN_BITS = PTIDXBITS * LEVELS;
const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
const reg_t VA_BITS = VPN_BITS + PGSHIFT;
struct insn_fetch_t
@ -155,7 +154,7 @@ private:
void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
// perform a page table walk for a given VA; set referenced/dirty bits
pte_t walk(reg_t addr, reg_t perm);
pte_t walk(reg_t addr, bool supervisor, bool store, bool fetch);
// translate a virtual address to a physical address
void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)

Loading…
Cancel
Save