Browse Source

Use HAVE_INT128 instead of __SIZEOF_INT128__

Make sure that the configure decision on 128-bit is consistent during
compilation.

Also move uint128_t definition.
pull/1368/head
Gianluca Guida 3 years ago
parent
commit
3010cb4175
  1. 5
      riscv/decode.h
  2. 5
      riscv/decode_macros.h
  3. 2
      riscv/processor.cc

5
riscv/decode.h

@ -19,11 +19,6 @@ typedef int64_t sreg_t;
typedef uint64_t reg_t;
typedef float128_t freg_t;
#ifdef __SIZEOF_INT128__
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
#endif
const int NXPR = 32;
const int NFPR = 32;
const int NVPR = 32;

5
riscv/decode_macros.h

@ -10,6 +10,11 @@
#include "softfloat_types.h"
#include "specialize.h"
#ifdef HAVE_INT128
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
#endif
// helpful macros, etc
#define MMU (*p->get_mmu())
#define STATE (*p->get_state())

2
riscv/processor.cc

@ -42,7 +42,7 @@ processor_t::processor_t(const isa_parser_t *isa, const cfg_t *cfg,
VU.p = this;
TM.proc = this;
#ifndef __SIZEOF_INT128__
#ifndef HAVE_INT128
if (extension_enabled('V')) {
fprintf(stderr, "V extension is not supported on platforms without __int128 type\n");
abort();

Loading…
Cancel
Save