Browse Source

avoid non-standard predefined macros

pull/49/head
Andrew Waterman 9 years ago
parent
commit
f6b2274af4
  1. 2
      bbl/bbl.c
  2. 2
      machine/bits.h
  3. 4
      machine/emulation.c
  4. 2
      machine/encoding.h
  5. 2
      machine/fp_asm.S
  6. 6
      machine/fp_emulation.c
  7. 4
      machine/fp_ldst.c
  8. 2
      machine/htif.h
  9. 4
      machine/misaligned_ldst.c
  10. 2
      machine/mtrap.c
  11. 4
      machine/muldiv_emulation.c
  12. 2
      machine/unprivileged_memory.h
  13. 2
      machine/vm.h
  14. 2
      pk/elf.c
  15. 2
      pk/elf.h
  16. 2
      pk/mmap.c
  17. 2
      pk/syscall.c

2
bbl/bbl.c

@ -17,7 +17,7 @@ static void supervisor_vm_init()
pte_t* sbi_pt = (pte_t*)(info.first_vaddr_after_user + info.load_offset);
memset(sbi_pt, 0, RISCV_PGSIZE);
pte_t* middle_pt = (void*)sbi_pt + RISCV_PGSIZE;
#ifndef __riscv64
#if __riscv_xlen == 32
size_t num_middle_pts = 1;
pte_t* root_pt = middle_pt;
memset(root_pt, 0, RISCV_PGSIZE);

2
machine/bits.h

@ -17,7 +17,7 @@
#define STR(x) XSTR(x)
#define XSTR(x) #x
#ifdef __riscv64
#if __riscv_xlen == 64
# define SLL32 sllw
# define STORE sd
# define LOAD ld

4
machine/emulation.c

@ -35,7 +35,7 @@ void illegal_insn_trap(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc)
" .word truly_illegal_insn\n"
#endif
" .word truly_illegal_insn\n"
#if !defined(__riscv_muldiv) && defined(__riscv64)
#if !defined(__riscv_muldiv) && __riscv_xlen >= 64
" .word emulate_mul_div32\n"
#else
" .word truly_illegal_insn\n"
@ -99,7 +99,7 @@ static inline int emulate_read_csr(int num, uintptr_t mstatus, uintptr_t* result
return -1;
*result = *mtime;
return 0;
#ifdef __riscv32
#if __riscv_xlen == 32
case CSR_TIMEH:
if (!((counteren >> (CSR_TIME - CSR_CYCLE)) & 1))
return -1;

2
machine/encoding.h

@ -150,7 +150,7 @@
#ifdef __riscv
#ifdef __riscv64
#if __riscv_xlen == 64
# define MSTATUS_SD MSTATUS64_SD
# define SSTATUS_SD SSTATUS64_SD
# define RISCV_PGLEVEL_BITS 9

2
machine/fp_asm.S

@ -8,7 +8,7 @@
#define get_f32(which) fmv.x.s a0, which; jr t0
#define put_f32(which) fmv.s.x which, a0; jr t0
#ifdef __riscv64
#if __riscv_xlen == 64
# define get_f64(which) fmv.x.d a0, which; jr t0
# define put_f64(which) fmv.d.x which, a0; jr t0
#else

6
machine/fp_emulation.c

@ -206,7 +206,7 @@ DECLARE_EMULATION_FUNC(emulate_fcvt_fi)
case 1: // uint32
uint_val = (uint32_t)uint_val;
break;
#ifdef __riscv64
#if __riscv_xlen == 64
case 2: // int64
negative = (int64_t)uint_val < 0;
uint_val = negative ? -uint_val : uint_val;
@ -230,7 +230,7 @@ DECLARE_EMULATION_FUNC(emulate_fcvt_fi)
DECLARE_EMULATION_FUNC(emulate_fcvt_if)
{
int rs2_num = (insn >> 20) & 0x1f;
#ifdef __riscv64
#if __riscv_xlen == 64
if (rs2_num >= 4)
return truly_illegal_insn(regs, mcause, mepc, mstatus, insn);
#else
@ -367,7 +367,7 @@ DECLARE_EMULATION_FUNC(emulate_fmv_fi)
if ((insn & MASK_FMV_S_X) == MATCH_FMV_S_X)
SET_F32_RD(insn, regs, rs1);
#ifdef __riscv64
#if __riscv_xlen == 64
else if ((insn & MASK_FMV_D_X) == MATCH_FMV_D_X)
SET_F64_RD(insn, regs, rs1);
#endif

4
machine/fp_ldst.c

@ -19,7 +19,7 @@ DECLARE_EMULATION_FUNC(emulate_float_load)
if (addr % sizeof(uintptr_t) != 0)
return misaligned_load_trap(regs, mcause, mepc);
#ifdef __riscv64
#if __riscv_xlen == 64
val = load_uint64_t((void *)addr, mepc);
#else
val = load_uint32_t((void *)addr, mepc);
@ -52,7 +52,7 @@ DECLARE_EMULATION_FUNC(emulate_float_store)
return misaligned_store_trap(regs, mcause, mepc);
val = GET_F64_RS2(insn, regs);
#ifdef __riscv64
#if __riscv_xlen == 64
store_uint64_t((void *)addr, val, mepc);
#else
store_uint32_t((void *)addr, val, mepc);

2
machine/htif.h

@ -3,7 +3,7 @@
#include <stdint.h>
#ifdef __riscv64
#if __riscv_xlen == 64
# define TOHOST_CMD(dev, cmd, payload) \
(((uint64_t)(dev) << 56) | ((uint64_t)(cmd) << 48) | (uint64_t)(payload))
#else

4
machine/misaligned_ldst.c

@ -19,7 +19,7 @@ void misaligned_load_trap(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc)
int shift = 0, fp = 0, len;
if ((insn & MASK_LW) == MATCH_LW)
len = 4, shift = 8*(sizeof(uintptr_t) - len);
#ifdef __riscv64
#if __riscv_xlen == 64
else if ((insn & MASK_LD) == MATCH_LD)
len = 8, shift = 8*(sizeof(uintptr_t) - len);
else if ((insn & MASK_LWU) == MATCH_LWU)
@ -62,7 +62,7 @@ void misaligned_store_trap(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc)
val.intx = GET_RS2(insn, regs);
if ((insn & MASK_SW) == MATCH_SW)
len = 4;
#ifdef __riscv64
#if __riscv_xlen == 64
else if ((insn & MASK_SD) == MATCH_SD)
len = 8;
#endif

2
machine/mtrap.c

@ -250,7 +250,7 @@ void mcall_trap(uintptr_t* regs, uintptr_t mcause, uintptr_t mepc)
retval = mcall_shutdown();
break;
case MCALL_SET_TIMER:
#ifdef __riscv32
#if __riscv_xlen == 32
retval = mcall_set_timer(arg0 + ((uint64_t)arg1 << 32));
#else
retval = mcall_set_timer(arg0);

4
machine/muldiv_emulation.c

@ -2,7 +2,7 @@
#ifndef __riscv_muldiv
#ifdef __riscv64
#if __riscv_xlen == 64
typedef __int128 double_int;
#else
typedef int64_t double_int;
@ -37,7 +37,7 @@ DECLARE_EMULATION_FUNC(emulate_mul_div)
SET_RD(insn, regs, val);
}
#ifdef __riscv64
#if __riscv_xlen == 64
DECLARE_EMULATION_FUNC(emulate_mul_div32)
{

2
machine/unprivileged_memory.h

@ -39,7 +39,7 @@ DECLARE_UNPRIVILEGED_LOAD_FUNCTION(int32_t, lw)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(uint8_t, sb)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(uint16_t, sh)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(uint32_t, sw)
#ifdef __riscv64
#if __riscv_xlen == 64
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(uint32_t, lwu)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(uint64_t, ld)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(uint64_t, sd)

2
machine/vm.h

@ -5,7 +5,7 @@
#include <stdint.h>
#define MEGAPAGE_SIZE ((uintptr_t)(RISCV_PGSIZE << RISCV_PGLEVEL_BITS))
#ifdef __riscv64
#if __riscv_xlen == 64
# define VM_CHOICE VM_SV39
# define VA_BITS 39
# define GIGAPAGE_SIZE (MEGAPAGE_SIZE << RISCV_PGLEVEL_BITS)

2
pk/elf.c

@ -36,7 +36,7 @@ void load_elf(const char* fn, elf_info* info)
eh.e_ident[2] == 'L' && eh.e_ident[3] == 'F'))
goto fail;
#ifdef __riscv64
#if __riscv_xlen == 64
assert(IS_ELF64(eh));
#else
assert(IS_ELF32(eh));

2
pk/elf.h

@ -12,7 +12,7 @@
#define IS_ELF32(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 1)
#define IS_ELF64(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 2)
#ifdef __riscv64
#if __riscv_xlen == 64
# define Elf_Ehdr Elf64_Ehdr
# define Elf_Phdr Elf64_Phdr
#else

2
pk/mmap.c

@ -385,7 +385,7 @@ void populate_mapping(const void* start, size_t size, int prot)
uintptr_t pk_vm_init()
{
#ifdef __riscv32
#if __riscv_xlen == 32
// We can't support more than 2 GiB of memory in RV32
mem_size = MIN(mem_size, 1U << 31);
#endif

2
pk/syscall.c

@ -320,7 +320,7 @@ int sys_getuid()
uintptr_t sys_mmap(uintptr_t addr, size_t length, int prot, int flags, int fd, off_t offset)
{
#ifdef __riscv32
#if __riscv_xlen == 32
if (offset != (offset << 12 >> 12))
return -ENXIO;
offset <<= 12;

Loading…
Cancel
Save