Browse Source

*: Remove ppc host support

Move the files from host/include/ppc to host/include/ppc64,
replacing the stub headers that redirected to ppc.

Remove linux-user/include/host/ppc.
Remove common-user/host/ppc.
Remove cpu == ppc tests from meson.

Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
pull/316/head
Richard Henderson 4 months ago
parent
commit
e23d76d896
  1. 107
      common-user/host/ppc/safe-syscall.inc.S
  2. 30
      host/include/ppc/host/cpuinfo.h
  3. 182
      host/include/ppc/host/crypto/aes-round.h
  4. 31
      host/include/ppc64/host/cpuinfo.h
  5. 183
      host/include/ppc64/host/crypto/aes-round.h
  6. 39
      linux-user/include/host/ppc/host-signal.h
  7. 4
      meson.build

107
common-user/host/ppc/safe-syscall.inc.S

@ -1,107 +0,0 @@
/*
* safe-syscall.inc.S : host-specific assembly fragment
* to handle signals occurring at the same time as system calls.
* This is intended to be included by common-user/safe-syscall.S
*
* Copyright (C) 2022 Linaro, Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
/*
* Standardize on the _CALL_FOO symbols used by GCC:
* Apple XCode does not define _CALL_DARWIN.
* Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
*/
#if !defined(_CALL_SYSV) && \
!defined(_CALL_DARWIN) && \
!defined(_CALL_AIX) && \
!defined(_CALL_ELF)
# if defined(__APPLE__)
# define _CALL_DARWIN
# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
# define _CALL_SYSV
# else
# error "Unknown ABI"
# endif
#endif
#ifndef _CALL_SYSV
# error "Unsupported ABI"
#endif
.global safe_syscall_base
.global safe_syscall_start
.global safe_syscall_end
.type safe_syscall_base, @function
.text
/*
* This is the entry point for making a system call. The calling
* convention here is that of a C varargs function with the
* first argument an 'int *' to the signal_pending flag, the
* second one the system call number (as a 'long'), and all further
* arguments being syscall arguments (also 'long').
*/
safe_syscall_base:
.cfi_startproc
stwu 1, -8(1)
.cfi_def_cfa_offset 8
stw 30, 4(1)
.cfi_offset 30, -4
/*
* We enter with r3 == &signal_pending
* r4 == syscall number
* r5 ... r10 == syscall arguments
* and return the result in r3
* and the syscall instruction needs
* r0 == syscall number
* r3 ... r8 == syscall arguments
* and returns the result in r3
* Shuffle everything around appropriately.
*/
mr 30, 3 /* signal_pending */
mr 0, 4 /* syscall number */
mr 3, 5 /* syscall arguments */
mr 4, 6
mr 5, 7
mr 6, 8
mr 7, 9
mr 8, 10
/*
* This next sequence of code works in conjunction with the
* rewind_if_safe_syscall_function(). If a signal is taken
* and the interrupted PC is anywhere between 'safe_syscall_start'
* and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
* The code sequence must therefore be able to cope with this, and
* the syscall instruction must be the final one in the sequence.
*/
safe_syscall_start:
/* if signal_pending is non-zero, don't do the call */
lwz 12, 0(30)
cmpwi 0, 12, 0
bne- 2f
sc
safe_syscall_end:
/* code path when we did execute the syscall */
lwz 30, 4(1) /* restore r30 */
addi 1, 1, 8 /* restore stack */
.cfi_restore 30
.cfi_def_cfa_offset 0
bnslr+ /* return on success */
b safe_syscall_set_errno_tail
/* code path when we didn't execute the syscall */
2: lwz 30, 4(1)
addi 1, 1, 8
addi 3, 0, QEMU_ERESTARTSYS
b safe_syscall_set_errno_tail
.cfi_endproc
.size safe_syscall_base, .-safe_syscall_base

30
host/include/ppc/host/cpuinfo.h

@ -1,30 +0,0 @@
/*
* SPDX-License-Identifier: GPL-2.0-or-later
* Host specific cpu identification for ppc.
*/
#ifndef HOST_CPUINFO_H
#define HOST_CPUINFO_H
/* Digested version of <cpuid.h> */
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
#define CPUINFO_V2_06 (1u << 1)
#define CPUINFO_V2_07 (1u << 2)
#define CPUINFO_V3_0 (1u << 3)
#define CPUINFO_V3_1 (1u << 4)
#define CPUINFO_ISEL (1u << 5)
#define CPUINFO_ALTIVEC (1u << 6)
#define CPUINFO_VSX (1u << 7)
#define CPUINFO_CRYPTO (1u << 8)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
/*
* We cannot rely on constructor ordering, so other constructors must
* use the function interface rather than the variable above.
*/
unsigned cpuinfo_init(void);
#endif /* HOST_CPUINFO_H */

182
host/include/ppc/host/crypto/aes-round.h

@ -1,182 +0,0 @@
/*
* Power v2.07 specific aes acceleration.
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_HOST_CRYPTO_AES_ROUND_H
#define PPC_HOST_CRYPTO_AES_ROUND_H
#ifdef __ALTIVEC__
#include "host/cpuinfo.h"
#ifdef __CRYPTO__
# define HAVE_AES_ACCEL true
#else
# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_CRYPTO)
#endif
#define ATTR_AES_ACCEL
/*
* While there is <altivec.h>, both gcc and clang "aid" with the
* endianness issues in different ways. Just use inline asm instead.
*/
/* Bytes in memory are host-endian; bytes in register are @be. */
static inline AESStateVec aes_accel_ld(const AESState *p, bool be)
{
AESStateVec r;
if (be) {
asm("lvx %0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
} else if (HOST_BIG_ENDIAN) {
AESStateVec rev = {
15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
};
asm("lvx %0, 0, %1\n\t"
"vperm %0, %0, %0, %2"
: "=v"(r) : "r"(p), "v"(rev), "m"(*p));
} else {
#ifdef __POWER9_VECTOR__
asm("lxvb16x %x0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
#else
asm("lxvd2x %x0, 0, %1\n\t"
"xxpermdi %x0, %x0, %x0, 2"
: "=v"(r) : "r"(p), "m"(*p));
#endif
}
return r;
}
static void aes_accel_st(AESState *p, AESStateVec r, bool be)
{
if (be) {
asm("stvx %1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
} else if (HOST_BIG_ENDIAN) {
AESStateVec rev = {
15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
};
asm("vperm %1, %1, %1, %2\n\t"
"stvx %1, 0, %3"
: "=m"(*p), "+v"(r) : "v"(rev), "r"(p));
} else {
#ifdef __POWER9_VECTOR__
asm("stxvb16x %x1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
#else
asm("xxpermdi %x1, %x1, %x1, 2\n\t"
"stxvd2x %x1, 0, %2"
: "=m"(*p), "+v"(r) : "r"(p));
#endif
}
}
static inline AESStateVec aes_accel_vcipher(AESStateVec d, AESStateVec k)
{
asm("vcipher %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vncipher(AESStateVec d, AESStateVec k)
{
asm("vncipher %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vcipherlast(AESStateVec d, AESStateVec k)
{
asm("vcipherlast %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vncipherlast(AESStateVec d, AESStateVec k)
{
asm("vncipherlast %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline void
aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
{
AESStateVec t, z = { };
t = aes_accel_ld(st, be);
t = aes_accel_vncipherlast(t, z);
t = aes_accel_vcipher(t, z);
aes_accel_st(ret, t, be);
}
static inline void
aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vcipherlast(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vcipher(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
{
AESStateVec t, z = { };
t = aes_accel_ld(st, be);
t = aes_accel_vcipherlast(t, z);
t = aes_accel_vncipher(t, z);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipherlast(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipher(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k, z = { };
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipher(t, z);
aes_accel_st(ret, t ^ k, be);
}
#else
/* Without ALTIVEC, we can't even write inline assembly. */
#include "host/include/generic/host/crypto/aes-round.h"
#endif
#endif /* PPC_HOST_CRYPTO_AES_ROUND_H */

31
host/include/ppc64/host/cpuinfo.h

@ -1 +1,30 @@
#include "host/include/ppc/host/cpuinfo.h"
/*
* SPDX-License-Identifier: GPL-2.0-or-later
* Host specific cpu identification for ppc.
*/
#ifndef HOST_CPUINFO_H
#define HOST_CPUINFO_H
/* Digested version of <cpuid.h> */
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
#define CPUINFO_V2_06 (1u << 1)
#define CPUINFO_V2_07 (1u << 2)
#define CPUINFO_V3_0 (1u << 3)
#define CPUINFO_V3_1 (1u << 4)
#define CPUINFO_ISEL (1u << 5)
#define CPUINFO_ALTIVEC (1u << 6)
#define CPUINFO_VSX (1u << 7)
#define CPUINFO_CRYPTO (1u << 8)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
/*
* We cannot rely on constructor ordering, so other constructors must
* use the function interface rather than the variable above.
*/
unsigned cpuinfo_init(void);
#endif /* HOST_CPUINFO_H */

183
host/include/ppc64/host/crypto/aes-round.h

@ -1 +1,182 @@
#include "host/include/ppc/host/crypto/aes-round.h"
/*
* Power v2.07 specific aes acceleration.
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef PPC_HOST_CRYPTO_AES_ROUND_H
#define PPC_HOST_CRYPTO_AES_ROUND_H
#ifdef __ALTIVEC__
#include "host/cpuinfo.h"
#ifdef __CRYPTO__
# define HAVE_AES_ACCEL true
#else
# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_CRYPTO)
#endif
#define ATTR_AES_ACCEL
/*
* While there is <altivec.h>, both gcc and clang "aid" with the
* endianness issues in different ways. Just use inline asm instead.
*/
/* Bytes in memory are host-endian; bytes in register are @be. */
static inline AESStateVec aes_accel_ld(const AESState *p, bool be)
{
AESStateVec r;
if (be) {
asm("lvx %0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
} else if (HOST_BIG_ENDIAN) {
AESStateVec rev = {
15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
};
asm("lvx %0, 0, %1\n\t"
"vperm %0, %0, %0, %2"
: "=v"(r) : "r"(p), "v"(rev), "m"(*p));
} else {
#ifdef __POWER9_VECTOR__
asm("lxvb16x %x0, 0, %1" : "=v"(r) : "r"(p), "m"(*p));
#else
asm("lxvd2x %x0, 0, %1\n\t"
"xxpermdi %x0, %x0, %x0, 2"
: "=v"(r) : "r"(p), "m"(*p));
#endif
}
return r;
}
static void aes_accel_st(AESState *p, AESStateVec r, bool be)
{
if (be) {
asm("stvx %1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
} else if (HOST_BIG_ENDIAN) {
AESStateVec rev = {
15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
};
asm("vperm %1, %1, %1, %2\n\t"
"stvx %1, 0, %3"
: "=m"(*p), "+v"(r) : "v"(rev), "r"(p));
} else {
#ifdef __POWER9_VECTOR__
asm("stxvb16x %x1, 0, %2" : "=m"(*p) : "v"(r), "r"(p));
#else
asm("xxpermdi %x1, %x1, %x1, 2\n\t"
"stxvd2x %x1, 0, %2"
: "=m"(*p), "+v"(r) : "r"(p));
#endif
}
}
static inline AESStateVec aes_accel_vcipher(AESStateVec d, AESStateVec k)
{
asm("vcipher %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vncipher(AESStateVec d, AESStateVec k)
{
asm("vncipher %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vcipherlast(AESStateVec d, AESStateVec k)
{
asm("vcipherlast %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline AESStateVec aes_accel_vncipherlast(AESStateVec d, AESStateVec k)
{
asm("vncipherlast %0, %0, %1" : "+v"(d) : "v"(k));
return d;
}
static inline void
aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
{
AESStateVec t, z = { };
t = aes_accel_ld(st, be);
t = aes_accel_vncipherlast(t, z);
t = aes_accel_vcipher(t, z);
aes_accel_st(ret, t, be);
}
static inline void
aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vcipherlast(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vcipher(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
{
AESStateVec t, z = { };
t = aes_accel_ld(st, be);
t = aes_accel_vcipherlast(t, z);
t = aes_accel_vncipher(t, z);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipherlast(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k;
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipher(t, k);
aes_accel_st(ret, t, be);
}
static inline void
aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
const AESState *rk, bool be)
{
AESStateVec t, k, z = { };
t = aes_accel_ld(st, be);
k = aes_accel_ld(rk, be);
t = aes_accel_vncipher(t, z);
aes_accel_st(ret, t ^ k, be);
}
#else
/* Without ALTIVEC, we can't even write inline assembly. */
#include "host/include/generic/host/crypto/aes-round.h"
#endif
#endif /* PPC_HOST_CRYPTO_AES_ROUND_H */

39
linux-user/include/host/ppc/host-signal.h

@ -1,39 +0,0 @@
/*
* host-signal.h: signal info dependent on the host architecture
*
* Copyright (c) 2022 Linaro Ltd.
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef PPC_HOST_SIGNAL_H
#define PPC_HOST_SIGNAL_H
#include <asm/ptrace.h>
/* The third argument to a SA_SIGINFO handler is ucontext_t. */
typedef ucontext_t host_sigcontext;
static inline uintptr_t host_signal_pc(host_sigcontext *uc)
{
return uc->uc_mcontext.regs->nip;
}
static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
{
uc->uc_mcontext.regs->nip = pc;
}
static inline void *host_signal_mask(host_sigcontext *uc)
{
return &uc->uc_sigmask;
}
static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
{
return uc->uc_mcontext.regs->trap != 0x400
&& (uc->uc_mcontext.regs->dsisr & 0x02000000);
}
#endif

4
meson.build

@ -50,7 +50,7 @@ qapi_trace_events = []
bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin']
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux', 'emscripten']
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86_64',
supported_cpus = ['ppc64', 's390x', 'riscv32', 'riscv64', 'x86_64',
'aarch64', 'loongarch64', 'mips64', 'sparc64', 'wasm64']
cpu = host_machine.cpu_family()
@ -279,8 +279,6 @@ elif cpu == 'aarch64'
kvm_targets = ['aarch64-softmmu']
elif cpu == 's390x'
kvm_targets = ['s390x-softmmu']
elif cpu == 'ppc'
kvm_targets = ['ppc-softmmu']
elif cpu == 'ppc64'
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
elif cpu == 'mips64'

Loading…
Cancel
Save