Browse Source

target-arm queue:

* target/arm: set the correct TI bits for WFIT traps
  * target/arm: Refactorings preparatory to KVM SME support
  * target/arm/kvm: Don't free migration-blocker reason on failure
  * target/arm/kvm: add kvm-psci-version vcpu property
  * Revert "target/arm/hvf: Sync CNTV_CTL_EL0 & CNTV_CVAL_EL0"
  * hw/arm/virt: Add virtio-mmio-transports property
  * hw/arm/virt: Update error message for bad gic-version option
  * hw/cpu: Build a15mpcore.c once as common object
  * hw/misc/allwinner-cpucfg: Remove use of ARM_CPU() cast macro
  * hw/ssi/xilinx_spips: Reset TX FIFO in reset
  * hw/char/pl011: Only log "data written to disabled UART" once
  * tests/functional: Make sure test case .py files are executable
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmmgbNoZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3hzREACaWhS6mbqwKA0ao5GJFIex
 NFF3PqUf5MFx5J2wt6wNYBRvE2/laqJM84v5c2fVqQmxUPC0K7kiBu8JRliFMDvK
 zeykVX30e0a59M3dxeqs5A3D398g9v4STIosj8HydKcVYavVyJaFoi5Uw01h6GAp
 BRlRAcXMX1RnICyltuueKXYkuIqCZ4lOlpDZbSU/c97b01B2Um4m3MOuKXM0RBGo
 RroTPkNInuIX+SSNbKiSb53CNzvZFn6cP/NQnCtarx9UONxxESPvXSRufG1YjqMw
 YJnvJsR8ZkEZfQMhN3305N5yaQdM8T2ZIcd8JT/mCiwTP/31xquCS+M2SjriD54p
 TU8lZm4r4r3K6DKWxpfa/A6BGUjStAkXVytfHKWZHBAKN59GhKIl6uX4I8W402H3
 OEzNs10K0arrCYkW4hUGnayW0DSzqYJKk8Ejc0vOJkDm3RP6NjiZmYom/iR1Tpbk
 1AKDJ+DuewuCSp9iFcXLbhzfZJl2in4lbJaFy8xRJn+M1CLbAsdKU5o7FJYjbKdS
 cBL/9STS+Puko+A9+WyLHh00drkFndjKruCPFTmXHnOyujIkegaBDrKN1OKebtgL
 XisfuivE6ipZqAMYZwEcnSZluKsLFDUo3rQXeQK4CQFuKvsSM9ilmBWQ9V5avxO3
 /goJymB/HYlVJhlEh33Bcg==
 =8ERD
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20260226' of https://gitlab.com/pm215/qemu into staging

target-arm queue:
 * target/arm: set the correct TI bits for WFIT traps
 * target/arm: Refactorings preparatory to KVM SME support
 * target/arm/kvm: Don't free migration-blocker reason on failure
 * target/arm/kvm: add kvm-psci-version vcpu property
 * Revert "target/arm/hvf: Sync CNTV_CTL_EL0 & CNTV_CVAL_EL0"
 * hw/arm/virt: Add virtio-mmio-transports property
 * hw/arm/virt: Update error message for bad gic-version option
 * hw/cpu: Build a15mpcore.c once as common object
 * hw/misc/allwinner-cpucfg: Remove use of ARM_CPU() cast macro
 * hw/ssi/xilinx_spips: Reset TX FIFO in reset
 * hw/char/pl011: Only log "data written to disabled UART" once
 * tests/functional: Make sure test case .py files are executable

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmmgbNoZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3hzREACaWhS6mbqwKA0ao5GJFIex
# NFF3PqUf5MFx5J2wt6wNYBRvE2/laqJM84v5c2fVqQmxUPC0K7kiBu8JRliFMDvK
# zeykVX30e0a59M3dxeqs5A3D398g9v4STIosj8HydKcVYavVyJaFoi5Uw01h6GAp
# BRlRAcXMX1RnICyltuueKXYkuIqCZ4lOlpDZbSU/c97b01B2Um4m3MOuKXM0RBGo
# RroTPkNInuIX+SSNbKiSb53CNzvZFn6cP/NQnCtarx9UONxxESPvXSRufG1YjqMw
# YJnvJsR8ZkEZfQMhN3305N5yaQdM8T2ZIcd8JT/mCiwTP/31xquCS+M2SjriD54p
# TU8lZm4r4r3K6DKWxpfa/A6BGUjStAkXVytfHKWZHBAKN59GhKIl6uX4I8W402H3
# OEzNs10K0arrCYkW4hUGnayW0DSzqYJKk8Ejc0vOJkDm3RP6NjiZmYom/iR1Tpbk
# 1AKDJ+DuewuCSp9iFcXLbhzfZJl2in4lbJaFy8xRJn+M1CLbAsdKU5o7FJYjbKdS
# cBL/9STS+Puko+A9+WyLHh00drkFndjKruCPFTmXHnOyujIkegaBDrKN1OKebtgL
# XisfuivE6ipZqAMYZwEcnSZluKsLFDUo3rQXeQK4CQFuKvsSM9ilmBWQ9V5avxO3
# /goJymB/HYlVJhlEh33Bcg==
# =8ERD
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu Feb 26 15:55:06 2026 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# gpg:                 aka "Peter Maydell <peter@archaic.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* tag 'pull-target-arm-20260226' of https://gitlab.com/pm215/qemu:
  hw/char/pl011: Only log "data written to disabled UART" once
  target/arm/kvm: Don't free migration-blocker reason on failure
  target/arm: Add have_ffr argument to kvm_arch_{get, put}_sve
  target/arm: Add vq argument to kvm_arch_{get, put}_sve
  target/arm: Drop kvm_arm_pmu_supported
  target/arm: Remove kvm test in arm_set_pmu
  target/arm: Drop kvm_arm_sve_supported
  target/arm: Move kvm test out of cpu_arm_set_sve
  target/arm: Init sve_vq in kvm_arm_set_cpu_features_from_host
  target/arm: Move kvm_arm_sve_get_vls within kvm.c
  target/arm: Remove aarch64 test for kvm
  hw/misc/allwinner-cpucfg: Remove use of ARM_CPU() cast macro
  hw/cpu: Build a15mpcore.c once as common object
  Revert "target/arm/hvf: Sync CNTV_CTL_EL0 & CNTV_CVAL_EL0"
  target/arm/kvm: add kvm-psci-version vcpu property
  tests/functional: Make sure test case .py files are executable
  hw/arm/virt: Update error message for bad gic-version option
  hw/arm/virt: Add virtio-mmio-transports property
  target/arm: set the correct TI bits for WFIT traps
  hw/ssi/xilinx_spips: Reset TX FIFO in reset

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 1 month ago
parent
commit
d8a9d97317
  1. 11
      docs/system/arm/cpu-features.rst
  2. 5
      docs/system/arm/virt.rst
  3. 2
      hw/arm/virt-acpi-build.c
  4. 45
      hw/arm/virt.c
  5. 24
      hw/char/pl011.c
  6. 2
      hw/cpu/a15mpcore.c
  7. 2
      hw/cpu/meson.build
  8. 4
      hw/misc/allwinner-cpucfg.c
  9. 4
      hw/ssi/xilinx_spips.c
  10. 1
      include/hw/arm/virt.h
  11. 1
      include/hw/char/pl011.h
  12. 12
      target/arm/cpu.c
  13. 72
      target/arm/cpu64.c
  14. 25
      target/arm/hvf/hvf.c
  15. 15
      target/arm/kvm-stub.c
  16. 197
      target/arm/kvm.c
  17. 35
      target/arm/kvm_arm.h
  18. 2
      target/arm/tcg/cpu64.c
  19. 2
      target/arm/tcg/op_helper.c
  20. 0
      tests/functional/ppc/test_ppe42.py

11
docs/system/arm/cpu-features.rst

@ -204,6 +204,17 @@ the list of KVM VCPU features and their descriptions.
the guest scheduler behavior and/or be exposed to the guest
userspace.
``kvm-psci-version``
Set the Power State Coordination Interface (PSCI) firmware ABI version
that KVM provides to the guest. By default KVM will use the newest
version that it knows about (which is PSCI v1.3 in Linux v6.13).
You only need to set this if you want to be able to migrate this
VM to a host machine running an older kernel that does not
recognize the PSCI version that this host's kernel defaults to.
Current valid values are: 0.1, 0.2, 1.0, 1.1, 1.2, and 1.3.
TCG VCPU Features
=================

5
docs/system/arm/virt.rst

@ -226,6 +226,11 @@ dtb-randomness
dtb-kaslr-seed
A deprecated synonym for dtb-randomness.
virtio-mmio-transports
Set the number of virtio-mmio transports to create (between 0 and 32;
the default is 32). Unused transports are harmless, but you can
use this property to avoid exposing them to the guest if you wish.
x-oem-id
Set string (up to 6 bytes) to override the default value of field OEMID in ACPI
table header.

2
hw/arm/virt-acpi-build.c

@ -1154,7 +1154,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
virtio_acpi_dsdt_add(scope, memmap[VIRT_MMIO].base, memmap[VIRT_MMIO].size,
(irqmap[VIRT_MMIO] + ARM_SPI_BASE),
0, NUM_VIRTIO_TRANSPORTS);
0, vms->virtio_transports);
acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms);
if (vms->acpi_dev) {
build_ged_aml(scope, "\\_SB."GED_DEVICE,

45
hw/arm/virt.c

@ -1208,7 +1208,7 @@ static void create_virtio_devices(const VirtMachineState *vms)
* between kernel versions). For reliable and stable identification
* of disks users must use UUIDs or similar mechanisms.
*/
for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) {
for (i = 0; i < vms->virtio_transports; i++) {
int irq = vms->irqmap[VIRT_MMIO] + i;
hwaddr base = vms->memmap[VIRT_MMIO].base + i * size;
@ -1223,7 +1223,7 @@ static void create_virtio_devices(const VirtMachineState *vms)
* loop influences virtio device to virtio transport assignment, whereas
* this loop controls how virtio transports are laid out in the dtb.
*/
for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) {
for (i = vms->virtio_transports - 1; i >= 0; i--) {
char *nodename;
int irq = vms->irqmap[VIRT_MMIO] + i;
hwaddr base = vms->memmap[VIRT_MMIO].base + i * size;
@ -2826,6 +2826,36 @@ static void virt_set_its(Object *obj, bool value, Error **errp)
}
}
static void virt_get_virtio_transports(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
uint8_t transports = vms->virtio_transports;
visit_type_uint8(v, name, &transports, errp);
}
static void virt_set_virtio_transports(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
uint8_t transports;
if (!visit_type_uint8(v, name, &transports, errp)) {
return;
}
if (transports > NUM_VIRTIO_TRANSPORTS) {
error_setg(errp, "virtio-mmio-transports must not exceed %d",
NUM_VIRTIO_TRANSPORTS);
return;
}
vms->virtio_transports = transports;
}
static bool virt_get_dtb_randomness(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@ -2971,7 +3001,7 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
vms->gic_version = VIRT_GIC_VERSION_MAX; /* Will probe later */
} else {
error_setg(errp, "Invalid gic-version value");
error_append_hint(errp, "Valid values are 3, 2, host, max.\n");
error_append_hint(errp, "Valid values are 2, 3, 4, host, and max.\n");
}
}
@ -3535,6 +3565,13 @@ static void virt_machine_class_init(ObjectClass *oc, const void *data)
"Set the high memory region size "
"for PCI MMIO");
object_class_property_add(oc, "virtio-mmio-transports", "uint8",
virt_get_virtio_transports,
virt_set_virtio_transports,
NULL, NULL);
object_class_property_set_description(oc, "virtio-mmio-transports",
"Set the number of virtio-mmio transports to instantiate");
object_class_property_add_str(oc, "gic-version", virt_get_gic_version,
virt_set_gic_version);
object_class_property_set_description(oc, "gic-version",
@ -3654,6 +3691,8 @@ static void virt_instance_init(Object *obj)
vms->irqmap = a15irqmap;
vms->virtio_transports = NUM_VIRTIO_TRANSPORTS;
virt_flash_create(vms);
vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);

24
hw/char/pl011.c

@ -227,10 +227,25 @@ static void pl011_loopback_tx(PL011State *s, uint32_t value)
static void pl011_write_txdata(PL011State *s, uint8_t data)
{
if (!(s->cr & CR_UARTEN)) {
qemu_log_mask(LOG_GUEST_ERROR,
"PL011 data written to disabled UART\n");
/*
* Only log this message once, not every time the guest outputs:
* otherwise we would flood the logs with this message, making
* harder to debug guests. (Some very popular guests like Linux
* don't actively enable the UART.)
*/
if (!s->logged_disabled_uart) {
qemu_log_mask(LOG_GUEST_ERROR,
"PL011 data written to disabled UART\n");
s->logged_disabled_uart = true;
}
}
if (!(s->cr & CR_TXE)) {
/*
* We don't bother with the only-log-once machinery for this check
* because TXE is enabled by default from PL011 reset, so there
* isn't likely to be existing in-the-wild guest code that trips
* over this one.
*/
qemu_log_mask(LOG_GUEST_ERROR,
"PL011 data written to disabled TX UART\n");
}
@ -457,6 +472,10 @@ static void pl011_write(void *opaque, hwaddr offset,
break;
case 12: /* UARTCR */
/* ??? Need to implement the enable bit. */
if ((s->cr ^ value) & CR_UARTEN) {
/* Re-arm the log warning when the guest toggles UARTEN */
s->logged_disabled_uart = false;
}
s->cr = value;
pl011_loopback_mdmctrl(s);
break;
@ -665,6 +684,7 @@ static void pl011_reset(DeviceState *dev)
s->ifl = 0x12;
s->cr = 0x300;
s->flags = 0;
s->logged_disabled_uart = false;
pl011_reset_rx_fifo(s);
pl011_reset_tx_fifo(s);
}

2
hw/cpu/a15mpcore.c

@ -22,10 +22,10 @@
#include "qapi/error.h"
#include "qemu/module.h"
#include "hw/cpu/a15mpcore.h"
#include "hw/core/cpu.h"
#include "hw/core/irq.h"
#include "hw/core/qdev-properties.h"
#include "system/kvm.h"
#include "kvm_arm.h"
#include "target/arm/gtimer.h"
static void a15mp_priv_set_irq(void *opaque, int irq, int level)

2
hw/cpu/meson.build

@ -4,4 +4,4 @@ system_ss.add(when: 'CONFIG_CPU_CLUSTER', if_true: files('cluster.c'))
system_ss.add(when: 'CONFIG_ARM11MPCORE', if_true: files('arm11mpcore.c'))
system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_mpcore.c'))
system_ss.add(when: 'CONFIG_A9MPCORE', if_true: files('a9mpcore.c'))
specific_ss.add(when: 'CONFIG_A15MPCORE', if_true: files('a15mpcore.c'))
system_ss.add(when: 'CONFIG_A15MPCORE', if_true: files('a15mpcore.c'))

4
hw/misc/allwinner-cpucfg.c

@ -84,7 +84,7 @@ static void allwinner_cpucfg_cpu_reset(AwCpuCfgState *s, uint8_t cpu_id)
trace_allwinner_cpucfg_cpu_reset(cpu_id, s->entry_addr);
ARMCPU *target_cpu = ARM_CPU(arm_get_cpu_by_id(cpu_id));
CPUState *target_cpu = arm_get_cpu_by_id(cpu_id);
if (!target_cpu) {
/*
* Called with a bogus value for cpu_id. Guest error will
@ -92,7 +92,7 @@ static void allwinner_cpucfg_cpu_reset(AwCpuCfgState *s, uint8_t cpu_id)
*/
return;
}
bool target_aa64 = arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64);
bool target_aa64 = arm_feature(cpu_env(target_cpu), ARM_FEATURE_AARCH64);
ret = arm_set_cpu_on(cpu_id, s->entry_addr, 0,
CPU_EXCEPTION_LEVEL_ON_RESET, target_aa64);

4
hw/ssi/xilinx_spips.c

@ -369,7 +369,7 @@ static void xilinx_spips_reset(DeviceState *d)
memset(s->regs, 0, sizeof(s->regs));
fifo8_reset(&s->rx_fifo);
fifo8_reset(&s->rx_fifo);
fifo8_reset(&s->tx_fifo);
/* non zero resets */
s->regs[R_CONFIG] |= MODEFAIL_GEN_EN;
s->regs[R_SLAVE_IDLE_COUNT] = 0xFF;
@ -397,7 +397,7 @@ static void xlnx_zynqmp_qspips_reset(DeviceState *d)
memset(s->regs, 0, sizeof(s->regs));
fifo8_reset(&s->rx_fifo_g);
fifo8_reset(&s->rx_fifo_g);
fifo8_reset(&s->tx_fifo_g);
fifo32_reset(&s->fifo_g);
s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET;
s->regs[R_GPIO] = 1;

1
include/hw/arm/virt.h

@ -172,6 +172,7 @@ struct VirtMachineState {
uint32_t msi_phandle;
uint32_t iommu_phandle;
int psci_conduit;
uint8_t virtio_transports;
hwaddr highest_gpa;
DeviceState *gic;
DeviceState *acpi_dev;

1
include/hw/char/pl011.h

@ -51,6 +51,7 @@ struct PL011State {
qemu_irq irq[6];
Clock *clk;
bool migrate_clk;
bool logged_disabled_uart;
const unsigned char *id;
/*
* Since some users embed this struct directly, we must

12
target/arm/cpu.c

@ -1144,7 +1144,13 @@ static void arm_cpu_initfn(Object *obj)
* picky DTB consumer will also provide a helpful error message.
*/
cpu->dtb_compatible = "qemu,unknown";
cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
if (!kvm_enabled()) {
/* By default KVM will use the newest PSCI version that it knows about.
* This can be changed using the kvm-psci-version property.
* For others assume PSCI v0.1 by default.
*/
cpu->psci_version = QEMU_PSCI_VERSION_0_1;
}
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
if (tcg_enabled() || hvf_enabled()) {
@ -1218,10 +1224,6 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp)
ARMCPU *cpu = ARM_CPU(obj);
if (value) {
if (kvm_enabled() && !kvm_arm_pmu_supported()) {
error_setg(errp, "'pmu' feature not supported by KVM on this host");
return;
}
set_feature(&cpu->env, ARM_FEATURE_PMU);
} else {
unset_feature(&cpu->env, ARM_FEATURE_PMU);

72
target/arm/cpu64.c

@ -79,28 +79,10 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
*/
uint32_t vq_map = cpu->sve_vq.map;
uint32_t vq_init = cpu->sve_vq.init;
uint32_t vq_supported;
uint32_t vq_supported = cpu->sve_vq.supported;
uint32_t vq_mask = 0;
uint32_t tmp, vq, max_vq = 0;
/*
* CPU models specify a set of supported vector lengths which are
* enabled by default. Attempting to enable any vector length not set
* in the supported bitmap results in an error. When KVM is enabled we
* fetch the supported bitmap from the host.
*/
if (kvm_enabled()) {
if (kvm_arm_sve_supported()) {
cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu);
vq_supported = cpu->sve_vq.supported;
} else {
assert(!cpu_isar_feature(aa64_sve, cpu));
vq_supported = 0;
}
} else {
vq_supported = cpu->sve_vq.supported;
}
/*
* Process explicit sve<N> properties.
* From the properties, sve_vq_map<N> implies sve_vq_init<N>.
@ -318,6 +300,30 @@ static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name,
vq_map->init |= 1 << (vq - 1);
}
static void prop_bool_get_false(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
bool value = false;
visit_type_bool(v, name, &value, errp);
}
static void prop_bool_set_false(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
bool value;
if (visit_type_bool(v, name, &value, errp) && value) {
error_setg(errp, "'%s' feature not supported by %s on this host",
name, current_accel_name());
}
}
static void prop_add_stub_bool(Object *obj, const char *name)
{
object_property_add(obj, name, "bool", prop_bool_get_false,
prop_bool_set_false, NULL, NULL);
}
static bool cpu_arm_get_sve(Object *obj, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
@ -327,12 +333,6 @@ static bool cpu_arm_get_sve(Object *obj, Error **errp)
static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
error_setg(errp, "'sve' feature not supported by KVM on this host");
return;
}
FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value);
}
@ -489,7 +489,23 @@ void aarch64_add_sve_properties(Object *obj)
ARMCPU *cpu = ARM_CPU(obj);
uint32_t vq;
object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
/*
* For hw virtualization, we have already probed the set of vector
* lengths supported. If there are none, the host doesn't support
* SVE at all. In which case we register a stub property, to allow
* -cpu max,sve=off
* to always be valid.
*
* For TCG, this function is only called for cpu models which
* support SVE. The error message in the stub is written
* assuming host virtualiation is being used.
*/
if (cpu->sve_vq.supported) {
object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
} else {
assert(!tcg_enabled());
prop_add_stub_bool(obj, "sve");
}
for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
char name[8];
@ -799,9 +815,7 @@ static void aarch64_host_initfn(Object *obj)
ARMCPU *cpu = ARM_CPU(obj);
#if defined(CONFIG_KVM)
kvm_arm_set_cpu_features_from_host(cpu);
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
aarch64_add_sve_properties(obj);
}
aarch64_add_sve_properties(obj);
#elif defined(CONFIG_HVF)
hvf_arm_set_cpu_features_from_host(cpu);
#elif defined(CONFIG_WHPX)

25
target/arm/hvf/hvf.c

@ -200,9 +200,6 @@ void hvf_arm_init_debug(void)
#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
#define SYSREG_CNTV_CTL_EL0 SYSREG(3, 3, 14, 3, 1)
#define SYSREG_CNTV_CVAL_EL0 SYSREG(3, 3, 14, 3, 2)
#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
#define SYSREG_ICC_AP0R0_EL1 SYSREG(3, 0, 12, 8, 4)
@ -505,7 +502,6 @@ int hvf_arch_get_registers(CPUState *cpu)
uint64_t val;
hv_simd_fp_uchar16_t fpval;
int i, n;
bool b;
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
@ -635,16 +631,6 @@ int hvf_arch_get_registers(CPUState *cpu)
aarch64_restore_sp(env, arm_current_el(env));
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &val);
assert_hvf_ok(ret);
b = hvf_sysreg_write_cp(cpu, "VTimer", SYSREG_CNTV_CVAL_EL0, val);
assert(b);
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &val);
assert_hvf_ok(ret);
b = hvf_sysreg_write_cp(cpu, "VTimer", SYSREG_CNTV_CTL_EL0, val);
assert(b);
return 0;
}
@ -656,7 +642,6 @@ int hvf_arch_put_registers(CPUState *cpu)
uint64_t val;
hv_simd_fp_uchar16_t fpval;
int i, n;
bool b;
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
@ -771,16 +756,6 @@ int hvf_arch_put_registers(CPUState *cpu)
ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
assert_hvf_ok(ret);
b = hvf_sysreg_read_cp(cpu, "VTimer", SYSREG_CNTV_CVAL_EL0, &val);
assert(b);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, val);
assert_hvf_ok(ret);
b = hvf_sysreg_read_cp(cpu, "VTimer", SYSREG_CNTV_CTL_EL0, &val);
assert(b);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, val);
assert_hvf_ok(ret);
return 0;
}

15
target/arm/kvm-stub.c

@ -32,16 +32,6 @@ bool kvm_arm_aarch32_supported(void)
return false;
}
bool kvm_arm_pmu_supported(void)
{
return false;
}
bool kvm_arm_sve_supported(void)
{
return false;
}
bool kvm_arm_mte_supported(void)
{
return false;
@ -95,11 +85,6 @@ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
g_assert_not_reached();
}
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
{
g_assert_not_reached();
}
void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
{
g_assert_not_reached();

197
target/arm/kvm.c

@ -60,6 +60,7 @@ typedef struct ARMHostCPUFeatures {
ARMISARegisters isar;
uint64_t features;
uint32_t target;
uint32_t sve_vq_supported;
const char *dtb_compatible;
} ARMHostCPUFeatures;
@ -243,6 +244,35 @@ static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf,
return ret;
}
static uint32_t kvm_arm_sve_get_vls(int fd)
{
uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
struct kvm_one_reg reg = {
.id = KVM_REG_ARM64_SVE_VLS,
.addr = (uint64_t)&vls[0],
};
uint32_t vq = 0;
int ret;
ret = ioctl(fd, KVM_GET_ONE_REG, &reg);
if (ret) {
error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
strerror(errno));
abort();
}
for (int i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
if (vls[i]) {
vq = 64 - clz64(vls[i]) + i * 64;
break;
}
}
if (vq > ARM_MAX_VQ) {
warn_report("KVM supports vector lengths larger than QEMU can enable");
}
return vls[0] & MAKE_64BIT_MASK(0, ARM_MAX_VQ);
}
static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{
/* Identify the feature bits corresponding to the host CPU, and
@ -267,7 +297,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* Ask for SVE if supported, so that we can query ID_AA64ZFR0,
* which is otherwise RAZ.
*/
sve_supported = kvm_arm_sve_supported();
sve_supported = kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
if (sve_supported) {
init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
}
@ -289,7 +319,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
}
if (kvm_arm_pmu_supported()) {
if (kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3)) {
init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
pmu_supported = true;
features |= 1ULL << ARM_FEATURE_PMU;
@ -415,6 +445,9 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* So only read the register if we set KVM_ARM_VCPU_SVE above.
*/
err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX);
/* Read the set of supported vector lengths. */
arm_host_cpu_features.sve_vq_supported = kvm_arm_sve_get_vls(fd);
}
}
@ -462,6 +495,7 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
cpu->kvm_target = arm_host_cpu_features.target;
cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
cpu->isar = arm_host_cpu_features.isar;
cpu->sve_vq.supported = arm_host_cpu_features.sve_vq_supported;
env->features = arm_host_cpu_features.features;
}
@ -485,6 +519,28 @@ static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
static char *kvm_get_psci_version(Object *obj, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
return g_strdup_printf("%d.%d",
(int) PSCI_VERSION_MAJOR(cpu->psci_version),
(int) PSCI_VERSION_MINOR(cpu->psci_version));
}
static void kvm_set_psci_version(Object *obj, const char *value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
uint16_t maj, min;
if (sscanf(value, "%hu.%hu", &maj, &min) != 2) {
error_setg(errp, "Invalid PSCI version.");
return;
}
cpu->psci_version = PSCI_VERSION(maj, min);
}
/* KVM VCPU properties should be prefixed with "kvm-". */
void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
{
@ -506,11 +562,12 @@ void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
kvm_steal_time_set);
object_property_set_description(obj, "kvm-steal-time",
"Set off to disable KVM steal time.");
}
bool kvm_arm_pmu_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3);
object_property_add_str(obj, "kvm-psci-version", kvm_get_psci_version,
kvm_set_psci_version);
object_property_set_description(obj, "kvm-psci-version",
"Set PSCI version. "
"Valid values are 0.1, 0.2, 1.0, 1.1, 1.2, 1.3");
}
int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
@ -1874,11 +1931,6 @@ bool kvm_arm_el2_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2);
}
bool kvm_arm_sve_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
}
bool kvm_arm_mte_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
@ -1886,60 +1938,6 @@ bool kvm_arm_mte_supported(void)
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
{
/* Only call this function if kvm_arm_sve_supported() returns true. */
static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
static bool probed;
uint32_t vq = 0;
int i;
/*
* KVM ensures all host CPUs support the same set of vector lengths.
* So we only need to create the scratch VCPUs once and then cache
* the results.
*/
if (!probed) {
struct kvm_vcpu_init init = {
.target = -1,
.features[0] = (1 << KVM_ARM_VCPU_SVE),
};
struct kvm_one_reg reg = {
.id = KVM_REG_ARM64_SVE_VLS,
.addr = (uint64_t)&vls[0],
};
int fdarray[3], ret;
probed = true;
if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
error_report("failed to create scratch VCPU with SVE enabled");
abort();
}
ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
kvm_arm_destroy_scratch_host_vcpu(fdarray);
if (ret) {
error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
strerror(errno));
abort();
}
for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
if (vls[i]) {
vq = 64 - clz64(vls[i]) + i * 64;
break;
}
}
if (vq > ARM_MAX_VQ) {
warn_report("KVM supports vector lengths larger than "
"QEMU can enable");
vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ);
}
}
return vls[0];
}
static int kvm_arm_sve_set_vls(ARMCPU *cpu)
{
uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
@ -1976,8 +1974,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (cs->start_powered_off) {
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
}
if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
cpu->psci_version = QEMU_PSCI_VERSION_0_2;
if (cpu->psci_version != QEMU_PSCI_VERSION_0_1 &&
kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
/*
* Versions >= v0.2 are backward compatible with v0.2
* omit the feature flag for v0.1 .
*/
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
}
if (!arm_feature(env, ARM_FEATURE_AARCH64)) {
@ -1987,7 +1989,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
}
if (cpu_isar_feature(aa64_sve, cpu)) {
assert(kvm_arm_sve_supported());
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
}
if (cpu_isar_feature(aa64_pauth, cpu)) {
@ -2015,6 +2016,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
}
if (cpu->psci_version) {
psciver = cpu->psci_version;
ret = kvm_set_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver);
if (ret) {
error_report("KVM in this kernel does not support PSCI version %d.%d",
(int) PSCI_VERSION_MAJOR(psciver),
(int) PSCI_VERSION_MINOR(psciver));
error_printf("Consider setting the kvm-psci-version property on the "
"migration source.\n");
return ret;
}
}
/*
* KVM reports the exact PSCI version it is implementing via a
* special sysreg. If it is present, use its contents to determine
@ -2105,16 +2118,15 @@ static int kvm_arch_put_fpsimd(CPUState *cs)
* code the slice index to zero for now as it's unlikely we'll need more than
* one slice for quite some time.
*/
static int kvm_arch_put_sve(CPUState *cs)
static int kvm_arch_put_sve(CPUState *cs, uint32_t vq, bool have_ffr)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
CPUARMState *env = cpu_env(cs);
uint64_t tmp[ARM_MAX_VQ * 2];
uint64_t *r;
int n, ret;
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], vq * 2);
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
if (ret) {
return ret;
@ -2122,19 +2134,20 @@ static int kvm_arch_put_sve(CPUState *cs)
}
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
r = sve_bswap64(tmp, &env->vfp.pregs[n].p[0], DIV_ROUND_UP(vq * 2, 8));
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
if (ret) {
return ret;
}
}
r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
if (have_ffr) {
r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
DIV_ROUND_UP(vq * 2, 8));
ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
}
}
return 0;
@ -2223,7 +2236,7 @@ int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp)
}
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = kvm_arch_put_sve(cs);
ret = kvm_arch_put_sve(cs, cpu->sve_max_vq, true);
} else {
ret = kvm_arch_put_fpsimd(cs);
}
@ -2289,10 +2302,9 @@ static int kvm_arch_get_fpsimd(CPUState *cs)
* code the slice index to zero for now as it's unlikely we'll need more than
* one slice for quite some time.
*/
static int kvm_arch_get_sve(CPUState *cs)
static int kvm_arch_get_sve(CPUState *cs, uint32_t vq, bool have_ffr)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
CPUARMState *env = cpu_env(cs);
uint64_t *r;
int n, ret;
@ -2302,7 +2314,7 @@ static int kvm_arch_get_sve(CPUState *cs)
if (ret) {
return ret;
}
sve_bswap64(r, r, cpu->sve_max_vq * 2);
sve_bswap64(r, r, vq * 2);
}
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
@ -2311,15 +2323,17 @@ static int kvm_arch_get_sve(CPUState *cs)
if (ret) {
return ret;
}
sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
sve_bswap64(r, r, DIV_ROUND_UP(vq * 2, 8));
}
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
if (have_ffr) {
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
}
sve_bswap64(r, r, DIV_ROUND_UP(vq * 2, 8));
}
sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
return 0;
}
@ -2407,7 +2421,7 @@ int kvm_arch_get_registers(CPUState *cs, Error **errp)
}
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = kvm_arch_get_sve(cs);
ret = kvm_arch_get_sve(cs, cpu->sve_max_vq, true);
} else {
ret = kvm_arch_get_fpsimd(cs);
}
@ -2540,7 +2554,6 @@ void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
error_setg(&mte_migration_blocker,
"Live migration disabled due to MTE enabled");
if (migrate_add_blocker(&mte_migration_blocker, errp)) {
error_free(mte_migration_blocker);
return;
}

35
target/arm/kvm_arm.h

@ -124,16 +124,6 @@ bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
*/
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
/**
* kvm_arm_sve_get_vls:
* @cpu: ARMCPU
*
* Get all the SVE vector lengths supported by the KVM host, setting
* the bits corresponding to their length in quadwords minus one
* (vq - 1) up to ARM_MAX_VQ. Return the resulting map.
*/
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu);
/**
* kvm_arm_set_cpu_features_from_host:
* @cpu: ARMCPU to set the features for
@ -178,21 +168,6 @@ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp);
*/
bool kvm_arm_aarch32_supported(void);
/**
* kvm_arm_pmu_supported:
*
* Returns: true if KVM can enable the PMU
* and false otherwise.
*/
bool kvm_arm_pmu_supported(void);
/**
* kvm_arm_sve_supported:
*
* Returns true if KVM can enable SVE and false otherwise.
*/
bool kvm_arm_sve_supported(void);
/**
* kvm_arm_mte_supported:
*
@ -213,16 +188,6 @@ static inline bool kvm_arm_aarch32_supported(void)
return false;
}
static inline bool kvm_arm_pmu_supported(void)
{
return false;
}
static inline bool kvm_arm_sve_supported(void)
{
return false;
}
static inline bool kvm_arm_mte_supported(void)
{
return false;

2
target/arm/tcg/cpu64.c

@ -524,10 +524,10 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->gic_pribits = 5;
/* The A64FX supports only 128, 256 and 512 bit vector lengths */
aarch64_add_sve_properties(obj);
cpu->sve_vq.supported = (1 << 0) /* 128bit */
| (1 << 1) /* 256bit */
| (1 << 3); /* 512bit */
aarch64_add_sve_properties(obj);
cpu->isar.reset_pmcr_el0 = 0x46014040;

2
target/arm/tcg/op_helper.c

@ -448,7 +448,7 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
if (target_el) {
env->pc -= 4;
raise_exception(env, excp, syn_wfx(1, 0xe, 0, false), target_el);
raise_exception(env, excp, syn_wfx(1, 0xe, 2, false), target_el);
}
if (uadd64_overflow(timeout, offset, &nexttick)) {

0
tests/functional/ppc/test_ppe42.py

Loading…
Cancel
Save