Browse Source

virtio,pci,pc: features, fixes

intel_iommu:
     SVM support
 vhost:
     support for indirect descriptors in shadow virtqueue
 vhost-user:
     vhost-user-spi support
     vhost-user-blk inflight migration support
     vhost-user-blk inflight migration support
 
 misc fixes in pci, vhost, virtio, acpi, cxl
 cleanups in acpi/ghes
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmmEa9APHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpqj8H/iBqAHZSTmAdBJgoLnmgoTLB01J9aUTrQU2H
 BHKyrd+G3m54pwjgUNN5ieZARtlXscigf6fr0Gq2wrc8/kV/O5G5jViw9+1Bo8nW
 OkLDW45nDzZGhap4oUedV+PJ3fCuW2fC8Jyb1n8OGlkadbhq0NU6GtqiEx6/7QIh
 hk5WUDE/3LH4cTp8qNtr0/nYfM4FZk2sjq7aRyg4cz/uC7rIAFRq7BCZ/dfRqMh/
 T+rLnizSSAg9PFMd8slWqoxOGF9NzT9LIoDSkAlso1L9lUekUSNoUblhlWDrRlLn
 DEEqqGCVounfBzA95WrTRmvWs6JodppjjAjI0M4isrMKGXXg8dg=
 =HdgY
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pci,pc: features, fixes

intel_iommu:
    SVM support
vhost:
    support for indirect descriptors in shadow virtqueue
vhost-user:
    vhost-user-spi support
    vhost-user-blk inflight migration support
    vhost-user-blk inflight migration support

misc fixes in pci, vhost, virtio, acpi, cxl
cleanups in acpi/ghes

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmmEa9APHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpqj8H/iBqAHZSTmAdBJgoLnmgoTLB01J9aUTrQU2H
# BHKyrd+G3m54pwjgUNN5ieZARtlXscigf6fr0Gq2wrc8/kV/O5G5jViw9+1Bo8nW
# OkLDW45nDzZGhap4oUedV+PJ3fCuW2fC8Jyb1n8OGlkadbhq0NU6GtqiEx6/7QIh
# hk5WUDE/3LH4cTp8qNtr0/nYfM4FZk2sjq7aRyg4cz/uC7rIAFRq7BCZ/dfRqMh/
# T+rLnizSSAg9PFMd8slWqoxOGF9NzT9LIoDSkAlso1L9lUekUSNoUblhlWDrRlLn
# DEEqqGCVounfBzA95WrTRmvWs6JodppjjAjI0M4isrMKGXXg8dg=
# =HdgY
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu Feb  5 10:07:12 2026 GMT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (38 commits)
  hw/cxl: Take into account how many media operations are requested for param check
  hw/cxl: Check for overflow on santize media as both base and offset 64bit.
  vhost-user-blk: support inter-host inflight migration
  vhost: add vmstate for inflight region with inner buffer
  vmstate: introduce VMSTATE_VBUFFER_UINT64
  vhost-user: introduce protocol feature for skip drain on GET_VRING_BASE
  vhost-user.rst: specify vhost-user back-end action on GET_VRING_BASE
  virtio-gpu: use consistent error checking for virtio_gpu_create_mapping_iov
  virtio-gpu: fix error handling in virgl_cmd_resource_create_blob
  virtio-pmem: ignore empty queue notifications
  virtio-gpu-virgl: correct parent for blob memory region
  MAINTAINERS: Update VIOT maintainer
  cryptodev-builtin: Limit the maximum size
  hw/virtio/virtio-crypto: verify asym request size
  virtio-spi: Add vhost-user-spi device support
  standard-headers: Update virtio_spi.h from Linux v6.18-rc3
  q35: Fix migration of SMRAM state
  pcie_sriov: Fix PCI_SRIOV_* accesses in pcie_sriov_pf_exit()
  virtio: Fix crash when sriov-pf is set for non-PCI-Express device
  virtio-dmabuf: Ensure UUID persistence for hash table insertion
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
pull/319/head
Peter Maydell 2 months ago
parent
commit
cd5a79dc98
  1. 8
      MAINTAINERS
  2. 9
      backends/cryptodev-builtin.c
  3. 60
      docs/interop/vhost-user.rst
  4. 3
      docs/system/devices/virtio/vhost-user.rst
  5. 6
      hw/acpi/ghes-stub.c
  6. 45
      hw/acpi/ghes.c
  7. 27
      hw/block/vhost-user-blk.c
  8. 11
      hw/cxl/cxl-mailbox-utils.c
  9. 6
      hw/display/virtio-dmabuf.c
  10. 8
      hw/display/virtio-gpu-virgl.c
  11. 4
      hw/display/virtio-gpu.c
  12. 204
      hw/i386/intel_iommu.c
  13. 14
      hw/i386/intel_iommu_internal.h
  14. 1
      hw/pci-host/articia.c
  15. 1
      hw/pci-host/aspeed_pcie.c
  16. 3
      hw/pci-host/designware.c
  17. 2
      hw/pci-host/gpex.c
  18. 1
      hw/pci-host/grackle.c
  19. 1
      hw/pci-host/gt64120.c
  20. 1
      hw/pci-host/mv64361.c
  21. 1
      hw/pci-host/pnv_phb.c
  22. 1
      hw/pci-host/ppce500.c
  23. 26
      hw/pci-host/q35.c
  24. 1
      hw/pci-host/raven.c
  25. 1
      hw/pci-host/remote.c
  26. 1
      hw/pci-host/sabre.c
  27. 4
      hw/pci-host/uninorth.c
  28. 2
      hw/pci-host/xilinx-pcie.c
  29. 4
      hw/pci/pci.c
  30. 1
      hw/pci/pci_host.c
  31. 21
      hw/pci/pcie.c
  32. 9
      hw/pci/pcie_sriov.c
  33. 1
      hw/pci/shpc.c
  34. 5
      hw/virtio/Kconfig
  35. 3
      hw/virtio/meson.build
  36. 1
      hw/virtio/vhost-shadow-virtqueue.c
  37. 69
      hw/virtio/vhost-user-spi-pci.c
  38. 65
      hw/virtio/vhost-user-spi.c
  39. 7
      hw/virtio/vhost-user.c
  40. 6
      hw/virtio/vhost-vdpa.c
  41. 66
      hw/virtio/vhost.c
  42. 7
      hw/virtio/virtio-crypto.c
  43. 20
      hw/virtio/virtio-pci.c
  44. 1
      hw/virtio/virtio-pmem.c
  45. 4
      hw/virtio/virtio.c
  46. 6
      include/hw/acpi/ghes.h
  47. 1
      include/hw/i386/intel_iommu.h
  48. 1
      include/hw/pci/pcie.h
  49. 1
      include/hw/virtio/vhost-user-blk.h
  50. 25
      include/hw/virtio/vhost-user-spi.h
  51. 2
      include/hw/virtio/vhost-user.h
  52. 8
      include/hw/virtio/vhost-vdpa.h
  53. 6
      include/hw/virtio/vhost.h
  54. 10
      include/migration/vmstate.h
  55. 181
      include/standard-headers/linux/virtio_spi.h
  56. 10
      target/arm/kvm.c
  57. 6
      tests/qtest/q35-test.c

8
MAINTAINERS

@ -2199,7 +2199,7 @@ S: Maintained
F: hw/riscv/virt-acpi-build.c
ACPI/VIOT
M: Jean-Philippe Brucker <jean-philippe@linaro.org>
M: Eric Auger <eric.auger@redhat.com>
S: Supported
F: hw/acpi/viot.c
F: hw/acpi/viot.h
@ -2558,6 +2558,12 @@ F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.*
vhost-user-spi
M: Haixu Cui <quic_haixcui@quicinc.com>
S: Maintained
F: include/hw/virtio/vhost-user-spi.h
F: hw/virtio/vhost-user-spi*
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported

9
backends/cryptodev-builtin.c

@ -53,6 +53,8 @@ typedef struct CryptoDevBackendBuiltinSession {
#define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512
#define CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN 64
/* demonstration purposes only, use a limited size to avoid QEMU OOM */
#define CRYPTODEV_BUITLIN_MAX_REQUEST_SIZE (1024 * 1024)
struct CryptoDevBackendBuiltin {
CryptoDevBackend parent_obj;
@ -98,12 +100,7 @@ static void cryptodev_builtin_init(
1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
/*
* Set the Maximum length of crypto request.
* Why this value? Just avoid to overflow when
* memory allocation for each crypto request.
*/
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
backend->conf.max_size = CRYPTODEV_BUITLIN_MAX_REQUEST_SIZE;
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
cryptodev_builtin_init_akcipher(backend);

60
docs/interop/vhost-user.rst

@ -411,8 +411,8 @@ in the ancillary data:
* ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``)
* ``VHOST_USER_SET_DEVICE_STATE_FD``
When sending file descriptors in ancilliary data, *front-end* should
associate the ancilliary data with a ``sendmsg`` operation (or
When sending file descriptors in ancillary data, *front-end* should
associate the ancillary data with a ``sendmsg`` operation (or
equivalent) that sends bytes starting with the first byte of the
message header. *back-end* can therefore expect that file descriptors
will only be received in the first ``recvmsg`` operation for a message
@ -743,6 +743,8 @@ negotiated, back-end can send file descriptors (at most 8 descriptors in
each message) to front-end via ancillary data using this fd communication
channel.
.. _inflight_io_tracking:
Inflight I/O tracking
---------------------
@ -1040,26 +1042,27 @@ Protocol features
.. code:: c
#define VHOST_USER_PROTOCOL_F_MQ 0
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_MTU 4
#define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5
#define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
#define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
#define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
#define VHOST_USER_PROTOCOL_F_CONFIG 9
#define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10
#define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
#define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13
#define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14
#define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
#define VHOST_USER_PROTOCOL_F_STATUS 16
#define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
#define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
#define VHOST_USER_PROTOCOL_F_DEVICE_STATE 19
#define VHOST_USER_PROTOCOL_F_MQ 0
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_MTU 4
#define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5
#define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
#define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
#define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
#define VHOST_USER_PROTOCOL_F_CONFIG 9
#define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10
#define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
#define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13
#define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14
#define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
#define VHOST_USER_PROTOCOL_F_STATUS 16
#define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
#define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
#define VHOST_USER_PROTOCOL_F_DEVICE_STATE 19
#define VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT 20
Front-end message types
-----------------------
@ -1255,6 +1258,19 @@ Front-end message types
The request payload's *num* field is currently reserved and must be
set to 0.
By default, the back-end must complete all inflight I/O requests for the
specified vring before stopping it.
If the ``VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT`` protocol
feature has been negotiated, the back-end may suspend in-flight I/O
requests and record them as described in :ref:`Inflight I/O tracking
<inflight_io_tracking>` instead of completing them before stopping the vring.
How to suspend an in-flight request depends on the implementation of the back-end
but it typically can be done by aborting or cancelling the underlying I/O
request. The ``VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT``
protocol feature must only be neogotiated if
``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD`` is also negotiated.
``VHOST_USER_SET_VRING_KICK``
:id: 12
:equivalent ioctl: ``VHOST_SET_VRING_KICK``

3
docs/system/devices/virtio/vhost-user.rst

@ -58,6 +58,9 @@ platform details for what sort of virtio bus to use.
* - vhost-user-vsock
- Socket based communication
- `vhost-device-vsock <https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock>`_
* - vhost-user-spi
- Proxy spi devices to host
- `vhost-device-spi <https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-spi>`_
The referenced *daemons* are not exhaustive, any conforming backend
implementing the device and using the vhost-user protocol should work.

6
hw/acpi/ghes-stub.c

@ -11,10 +11,10 @@
#include "qemu/osdep.h"
#include "hw/acpi/ghes.h"
int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t physical_address)
bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t physical_address, Error **errp)
{
return -1;
g_assert_not_reached();
}
AcpiGhesState *acpi_ghes_get_state(void)

45
hw/acpi/ghes.c

@ -444,7 +444,7 @@ static void get_hw_error_offsets(uint64_t ghes_addr,
*read_ack_register_addr = ghes_addr + sizeof(uint64_t);
}
static void get_ghes_source_offsets(uint16_t source_id,
static bool get_ghes_source_offsets(uint16_t source_id,
uint64_t hest_addr,
uint64_t *cper_addr,
uint64_t *read_ack_start_addr,
@ -475,7 +475,7 @@ static void get_ghes_source_offsets(uint16_t source_id,
/* For now, we only know the size of GHESv2 table */
if (type != ACPI_GHES_SOURCE_GENERIC_ERROR_V2) {
error_setg(errp, "HEST: type %d not supported.", type);
return;
return false;
}
/* Compare CPER source ID at the GHESv2 structure */
@ -489,7 +489,7 @@ static void get_ghes_source_offsets(uint16_t source_id,
}
if (i == num_sources) {
error_setg(errp, "HEST: Source %d not found.", source_id);
return;
return false;
}
/* Navigate through table address pointers */
@ -509,27 +509,30 @@ static void get_ghes_source_offsets(uint16_t source_id,
cpu_physical_memory_read(hest_read_ack_addr, read_ack_start_addr,
sizeof(*read_ack_start_addr));
*read_ack_start_addr = le64_to_cpu(*read_ack_start_addr);
return true;
}
NotifierList acpi_generic_error_notifiers =
NOTIFIER_LIST_INITIALIZER(acpi_generic_error_notifiers);
void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
bool ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
uint16_t source_id, Error **errp)
{
uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register;
if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) {
error_setg(errp, "GHES CPER record is too big: %zd", len);
return;
return false;
}
if (!ags->use_hest_addr) {
get_hw_error_offsets(le64_to_cpu(ags->hw_error_le),
&cper_addr, &read_ack_register_addr);
} else {
get_ghes_source_offsets(source_id, le64_to_cpu(ags->hest_addr_le),
&cper_addr, &read_ack_register_addr, errp);
} else if (!get_ghes_source_offsets(source_id,
le64_to_cpu(ags->hest_addr_le),
&cper_addr, &read_ack_register_addr, errp)) {
return false;
}
cpu_physical_memory_read(read_ack_register_addr,
@ -540,7 +543,7 @@ void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
error_setg(errp,
"OSPM does not acknowledge previous error,"
" so can not record CPER for current error anymore");
return;
return false;
}
read_ack_register = cpu_to_le64(0);
@ -555,20 +558,19 @@ void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
cpu_physical_memory_write(cper_addr, cper, len);
notifier_list_notify(&acpi_generic_error_notifiers, &source_id);
return true;
}
int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t physical_address)
bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t physical_address, Error **errp)
{
/* Memory Error Section Type */
const uint8_t guid[] =
UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
0xED, 0x7C, 0x83, 0xB1);
Error *err = NULL;
int data_length;
GArray *block;
block = g_array_new(false, true /* clear */, 1);
g_autoptr(GArray) block = g_array_new(false, true /* clear */, 1);
data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH;
/*
@ -583,17 +585,8 @@ int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
/* Build the memory section CPER for above new generic error data entry */
acpi_ghes_build_append_mem_cper(block, physical_address);
/* Report the error */
ghes_record_cper_errors(ags, block->data, block->len, source_id, &err);
g_array_free(block, true);
if (err) {
error_report_err(err);
return -1;
}
return 0;
return ghes_record_cper_errors(ags, block->data, block->len,
source_id, errp);
}
AcpiGhesState *acpi_ghes_get_state(void)

27
hw/block/vhost-user-blk.c

@ -353,6 +353,7 @@ static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
vhost_dev_set_config_notifier(&s->dev, &blk_ops);
s->vhost_user.supports_config = true;
s->vhost_user.supports_inflight_migration = s->inflight_migration;
ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
errp);
if (ret < 0) {
@ -568,6 +569,26 @@ static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev)
return &s->dev;
}
static bool vhost_user_blk_inflight_needed(void *opaque)
{
struct VHostUserBlk *s = opaque;
bool inflight_migration = virtio_has_feature(s->dev.protocol_features,
VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT);
return inflight_migration;
}
static const VMStateDescription vmstate_vhost_user_blk_inflight = {
.name = "vhost-user-blk/inflight",
.version_id = 1,
.needed = vhost_user_blk_inflight_needed,
.fields = (const VMStateField[]) {
VMSTATE_VHOST_INFLIGHT_REGION(inflight, VHostUserBlk),
VMSTATE_END_OF_LIST()
},
};
static const VMStateDescription vmstate_vhost_user_blk = {
.name = "vhost-user-blk",
.minimum_version_id = 1,
@ -576,6 +597,10 @@ static const VMStateDescription vmstate_vhost_user_blk = {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
&vmstate_vhost_user_blk_inflight,
NULL
}
};
static const Property vhost_user_blk_properties[] = {
@ -591,6 +616,8 @@ static const Property vhost_user_blk_properties[] = {
VIRTIO_BLK_F_WRITE_ZEROES, true),
DEFINE_PROP_BOOL("skip-get-vring-base-on-force-shutdown", VHostUserBlk,
skip_get_vring_base_on_force_shutdown, false),
DEFINE_PROP_BOOL("inflight-migration", VHostUserBlk,
inflight_migration, false),
};
static void vhost_user_blk_class_init(ObjectClass *klass, const void *data)

11
hw/cxl/cxl-mailbox-utils.c

@ -1875,7 +1875,7 @@ static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr)
static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
size_t length)
{
uint64_t vmr_size, pmr_size, dc_size;
uint64_t vmr_size, pmr_size, dc_size, dpa_end;
if ((dpa_addr % CXL_CACHE_LINE_SIZE) ||
(length % CXL_CACHE_LINE_SIZE) ||
@ -1887,7 +1887,12 @@ static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
pmr_size = get_pmr_size(ct3d, NULL);
dc_size = get_dc_size(ct3d, NULL);
if (dpa_addr + length > vmr_size + pmr_size + dc_size) {
/* sanitize 64 bit values coming from guest */
if (uadd64_overflow(dpa_addr, length, &dpa_end)) {
return -EINVAL;
}
if (dpa_end > vmr_size + pmr_size + dc_size) {
return -EINVAL;
}
@ -2006,7 +2011,7 @@ static CXLRetCode media_operations_discovery(uint8_t *payload_in,
* sub class command.
*/
if (media_op_in_disc_pl->dpa_range_count ||
start_index > ARRAY_SIZE(media_op_matrix)) {
start_index + num_ops > ARRAY_SIZE(media_op_matrix)) {
return CXL_MBOX_INVALID_INPUT;
}

6
hw/display/virtio-dmabuf.c

@ -35,11 +35,13 @@ static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
if (resource_uuids == NULL) {
resource_uuids = g_hash_table_new_full(qemu_uuid_hash,
uuid_equal_func,
NULL,
g_free,
g_free);
}
if (g_hash_table_lookup(resource_uuids, uuid) == NULL) {
g_hash_table_insert(resource_uuids, uuid, value);
g_hash_table_insert(resource_uuids,
g_memdup2(uuid, sizeof(*uuid)),
value);
} else {
result = false;
}

8
hw/display/virtio-gpu-virgl.c

@ -120,7 +120,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
vmr->g = g;
mr = &vmr->mr;
memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
memory_region_init_ram_ptr(mr, OBJECT(mr), NULL, size, data);
memory_region_add_subregion(&b->hostmem, offset, mr);
memory_region_set_enabled(mr, true);
@ -186,7 +186,7 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
/* memory region owns self res->mr object and frees it by itself */
memory_region_set_enabled(mr, false);
memory_region_del_subregion(&b->hostmem, mr);
object_unparent(OBJECT(mr));
object_unref(OBJECT(mr));
}
return 0;
@ -561,7 +561,7 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
cmd, NULL, &res_iovs, &res_niov);
if (ret != 0) {
if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
@ -705,7 +705,7 @@ static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
cmd, &res->base.addrs,
&res->base.iov, &res->base.iov_cnt);
if (!ret) {
if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}

4
hw/display/virtio-gpu.c

@ -354,7 +354,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
cmd, &res->addrs, &res->iov,
&res->iov_cnt);
if (ret != 0) {
if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
g_free(res);
return;
@ -933,7 +933,7 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
&res->addrs, &res->iov, &res->iov_cnt);
if (ret != 0) {
if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}

204
hw/i386/intel_iommu.c

@ -1998,7 +1998,7 @@ static int vtd_iova_to_fspte(IntelIOMMUState *s, VTDContextEntry *ce,
uint64_t iova, bool is_write,
uint64_t *fsptep, uint32_t *fspte_level,
bool *reads, bool *writes, uint8_t aw_bits,
uint32_t pasid)
uint32_t pasid, int iommu_idx)
{
dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
uint32_t offset;
@ -2039,7 +2039,8 @@ static int vtd_iova_to_fspte(IntelIOMMUState *s, VTDContextEntry *ce,
*reads = true;
*writes = (*writes) && (fspte & VTD_FS_RW);
if (is_write && !(fspte & VTD_FS_RW)) {
/* ATS should not fail when the write permission is not set */
if (is_write && !(fspte & VTD_FS_RW) && iommu_idx != VTD_IDX_ATS) {
return -VTD_FR_SM_WRITE;
}
if (vtd_fspte_nonzero_rsvd(fspte, *fspte_level)) {
@ -2098,7 +2099,7 @@ static void vtd_report_fault(IntelIOMMUState *s,
*/
static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
uint8_t devfn, hwaddr addr, bool is_write,
IOMMUTLBEntry *entry)
IOMMUTLBEntry *entry, int iommu_idx)
{
IntelIOMMUState *s = vtd_as->iommu_state;
VTDContextEntry ce;
@ -2204,7 +2205,8 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
if (s->fsts && s->root_scalable) {
ret_fr = vtd_iova_to_fspte(s, &ce, addr, is_write, &pte, &level,
&reads, &writes, s->aw_bits, pasid);
&reads, &writes, s->aw_bits, pasid,
iommu_idx);
pgtt = VTD_SM_PASID_ENTRY_FST;
} else {
ret_fr = vtd_iova_to_sspte(s, &ce, addr, is_write, &pte, &level,
@ -2860,8 +2862,10 @@ static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s,
static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI,
VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
uint64_t mask[4] = {
VTD_INV_DESC_WAIT_RSVD_LO(s->ecap), VTD_INV_DESC_WAIT_RSVD_HI,
VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE
};
bool ret = true;
if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
@ -3985,6 +3989,25 @@ static void vtd_mem_write(void *opaque, hwaddr addr,
}
}
static void vtd_prepare_identity_entry(hwaddr addr, IOMMUAccessFlags perm,
uint32_t pasid, IOMMUTLBEntry *iotlb)
{
iotlb->iova = addr & VTD_PAGE_MASK_4K;
iotlb->translated_addr = addr & VTD_PAGE_MASK_4K;
iotlb->addr_mask = ~VTD_PAGE_MASK_4K;
iotlb->perm = perm;
iotlb->pasid = pasid;
}
static inline void vtd_prepare_error_entry(IOMMUTLBEntry *entry)
{
entry->iova = 0;
entry->translated_addr = 0;
entry->addr_mask = ~VTD_PAGE_MASK_4K;
entry->perm = IOMMU_NONE;
entry->pasid = PCI_NO_PASID;
}
static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
IOMMUAccessFlags flag, int iommu_idx)
{
@ -3996,16 +4019,29 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
.pasid = vtd_as->pasid,
};
bool success;
bool is_write = flag & IOMMU_WO;
if (likely(s->dmar_enabled)) {
success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
addr, flag & IOMMU_WO, &iotlb);
/* Only support translated requests in scalable mode */
if (iommu_idx == VTD_IDX_TRANSLATED && s->root_scalable) {
if (vtd_as->pasid == PCI_NO_PASID) {
vtd_prepare_identity_entry(addr, IOMMU_RW, PCI_NO_PASID,
&iotlb);
success = true;
} else {
vtd_prepare_error_entry(&iotlb);
error_report_once("%s: translated request with PASID not "
"allowed (pasid=0x%" PRIx32 ")", __func__,
vtd_as->pasid);
success = false;
}
} else {
success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
addr, is_write, &iotlb, iommu_idx);
}
} else {
/* DMAR disabled, passthrough, use 4k-page*/
iotlb.iova = addr & VTD_PAGE_MASK_4K;
iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
iotlb.perm = IOMMU_RW;
vtd_prepare_identity_entry(addr, IOMMU_RW, vtd_as->pasid, &iotlb);
success = true;
}
@ -4152,6 +4188,7 @@ static const Property vtd_properties[] = {
DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, fsts, FALSE),
DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false),
DEFINE_PROP_BOOL("svm", IntelIOMMUState, svm, false),
DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false),
DEFINE_PROP_BOOL("fs1gp", IntelIOMMUState, fs1gp, true),
@ -4414,6 +4451,37 @@ static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
src, dst, sid, false);
}
static void vtd_report_sid_ir_illegal_access(IntelIOMMUState *s, uint16_t sid,
uint32_t pasid, hwaddr addr,
bool is_write)
{
uint8_t bus_n = VTD_SID_TO_BUS(sid);
uint8_t devfn = VTD_SID_TO_DEVFN(sid);
bool is_fpd_set = false;
VTDContextEntry ce;
/* Try out best to fetch FPD, we can't do anything more */
if (vtd_dev_to_context_entry(s, bus_n, devfn, &ce) == 0) {
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
if (!is_fpd_set && s->root_scalable) {
vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, pasid);
}
}
vtd_report_fault(s, VTD_FR_SM_INTERRUPT_ADDR, is_fpd_set, sid, addr,
is_write, pasid != PCI_NO_PASID, pasid);
}
static void vtd_report_ir_illegal_access(VTDAddressSpace *vtd_as,
hwaddr addr, bool is_write)
{
uint8_t bus_n = pci_bus_num(vtd_as->bus);
uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn);
vtd_report_sid_ir_illegal_access(vtd_as->iommu_state, sid, vtd_as->pasid,
addr, is_write);
}
static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
@ -4425,9 +4493,11 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size,
MemTxAttrs attrs)
{
IntelIOMMUState *s = opaque;
int ret = 0;
MSIMessage from = {}, to = {};
uint16_t sid = X86_IOMMU_SID_INVALID;
uint32_t pasid;
from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
from.data = (uint32_t) value;
@ -4435,9 +4505,16 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
if (!attrs.unspecified) {
/* We have explicit Source ID */
sid = attrs.requester_id;
pasid = attrs.pid != 0 ? attrs.pid : PCI_NO_PASID;
if (attrs.address_type == PCI_AT_TRANSLATED &&
sid != X86_IOMMU_SID_INVALID) {
vtd_report_sid_ir_illegal_access(s, sid, pasid, from.address, true);
return MEMTX_ERROR;
}
}
ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid, true);
ret = vtd_interrupt_remap_msi(s, &from, &to, sid, true);
if (ret) {
/* Drop this interrupt */
return MEMTX_ERROR;
@ -4462,30 +4539,6 @@ static const MemoryRegionOps vtd_mem_ir_ops = {
},
};
static void vtd_report_ir_illegal_access(VTDAddressSpace *vtd_as,
hwaddr addr, bool is_write)
{
IntelIOMMUState *s = vtd_as->iommu_state;
uint8_t bus_n = pci_bus_num(vtd_as->bus);
uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn);
bool is_fpd_set = false;
VTDContextEntry ce;
assert(vtd_as->pasid != PCI_NO_PASID);
/* Try out best to fetch FPD, we can't do anything more */
if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
if (!is_fpd_set && s->root_scalable) {
vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid);
}
}
vtd_report_fault(s, VTD_FR_SM_INTERRUPT_ADDR,
is_fpd_set, sid, addr, is_write,
true, vtd_as->pasid);
}
static MemTxResult vtd_mem_ir_fault_read(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
@ -5046,6 +5099,10 @@ static void vtd_init(IntelIOMMUState *s)
vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP;
}
if (s->svm) {
s->ecap |= VTD_ECAP_PRS | VTD_ECAP_PDS | VTD_ECAP_NWFS;
}
vtd_reset_caches(s);
/* Define registers with default values and bit semantics */
@ -5140,19 +5197,29 @@ static IOMMUTLBEntry vtd_iommu_ats_do_translate(IOMMUMemoryRegion *iommu,
hwaddr addr,
IOMMUAccessFlags flags)
{
IOMMUTLBEntry entry;
IOMMUTLBEntry entry = { .target_as = &address_space_memory };
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
/* Guard that makes sure we avoid weird behaviors */
if ((flags & IOMMU_PRIV) && (s->ecap & VTD_ECAP_SRS)) {
error_report_once("Privileged ATS not supported");
abort();
}
if (vtd_is_interrupt_addr(addr)) {
vtd_prepare_error_entry(&entry);
vtd_report_ir_illegal_access(vtd_as, addr, flags & IOMMU_WO);
entry.target_as = &address_space_memory;
entry.iova = 0;
entry.translated_addr = 0;
entry.addr_mask = ~VTD_PAGE_MASK_4K;
entry.perm = IOMMU_NONE;
entry.pasid = PCI_NO_PASID;
} else if ((flags & IOMMU_PRIV) && !(s->ecap & VTD_ECAP_SRS)) {
/*
* For translation-request-with-PASID with PR=1, remapping hardware
* not supporting supervisor requests (SRS=0 in the Extended
* Capability Register) forces R=W=E=0 in addition to setting PRIV=1.
*/
vtd_prepare_error_entry(&entry);
entry.perm = IOMMU_PRIV;
} else {
entry = vtd_iommu_translate(iommu, addr, flags, 0);
entry = vtd_iommu_translate(iommu, addr, flags, VTD_IDX_ATS);
}
return entry;
@ -5513,6 +5580,29 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
return false;
}
if (s->svm) {
if (!x86_iommu->dt_supported) {
error_setg(errp, "Need to set device IOTLB for svm");
return false;
}
if (!s->fsts) {
error_setg(errp, "Need to set flts for svm");
return false;
}
if (!x86_iommu->dma_translation) {
error_setg(errp, "Need to set dma-translation for svm");
return false;
}
if (!s->pasid) {
error_setg(errp, "Need to set PASID support for svm");
return false;
}
}
return true;
}
@ -5523,17 +5613,6 @@ static void vtd_realize(DeviceState *dev, Error **errp)
X86MachineState *x86ms = X86_MACHINE(ms);
PCIBus *bus = pcms->pcibus;
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
if (s->pasid && x86_iommu->dt_supported) {
/*
* PASID-based-Device-TLB Invalidate Descriptor is not
* implemented and it requires support from vhost layer which
* needs to be implemented in the future.
*/
error_setg(errp, "PASID based device IOTLB is not supported");
return;
}
if (!vtd_decide_config(s, errp)) {
return;
@ -5601,6 +5680,17 @@ static const TypeInfo vtd_info = {
.class_init = vtd_class_init,
};
static int vtd_attrs_to_index(IOMMUMemoryRegion *iommu_mr, MemTxAttrs attrs)
{
return attrs.address_type == PCI_AT_TRANSLATED ?
VTD_IDX_TRANSLATED : VTD_IDX_UNTRANSLATED;
}
static int vtd_num_indexes(IOMMUMemoryRegion *iommu)
{
return VTD_IDX_COUNT;
}
static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
const void *data)
{
@ -5609,6 +5699,8 @@ static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
imrc->translate = vtd_iommu_translate;
imrc->notify_flag_changed = vtd_iommu_notify_flag_changed;
imrc->replay = vtd_iommu_replay;
imrc->attrs_to_index = vtd_attrs_to_index;
imrc->num_indexes = vtd_num_indexes;
}
static const TypeInfo vtd_iommu_memory_region_info = {

14
hw/i386/intel_iommu_internal.h

@ -194,8 +194,10 @@
#define VTD_ECAP_PRS (1ULL << 29)
#define VTD_ECAP_MHMV (15ULL << 20)
#define VTD_ECAP_SRS (1ULL << 31)
#define VTD_ECAP_NWFS (1ULL << 33)
#define VTD_ECAP_PSS (7ULL << 35) /* limit: MemTxAttrs::pid */
#define VTD_ECAP_PASID (1ULL << 40)
#define VTD_ECAP_PDS (1ULL << 42)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SSTS (1ULL << 46)
#define VTD_ECAP_FSTS (1ULL << 47)
@ -417,7 +419,9 @@ typedef union VTDPRDesc VTDPRDesc;
#define VTD_INV_DESC_WAIT_IF (1ULL << 4)
#define VTD_INV_DESC_WAIT_FN (1ULL << 6)
#define VTD_INV_DESC_WAIT_DATA_SHIFT 32
#define VTD_INV_DESC_WAIT_RSVD_LO 0Xfffff180ULL
#define VTD_INV_DESC_WAIT_RSVD_LO(ecap) (0Xfffff100ULL | \
(((ecap) & VTD_ECAP_PDS) \
? 0 : (1 << 7)))
#define VTD_INV_DESC_WAIT_RSVD_HI 3ULL
/* Masks for Context-cache Invalidation Descriptor */
@ -688,6 +692,14 @@ typedef struct VTDPIOTLBInvInfo {
/* Bits to decide the offset for each level */
#define VTD_LEVEL_BITS 9
/* IOMMU Index */
typedef enum VTDIOMMUIndex {
VTD_IDX_UNTRANSLATED = 0, /* Default */
VTD_IDX_TRANSLATED = 1,
VTD_IDX_ATS = 2,
VTD_IDX_COUNT = 3, /* Number of supported indexes */
} VTDIOMMUIndex;
typedef struct VTDHostIOMMUDevice {
IntelIOMMUState *iommu_state;
PCIBus *bus;

1
hw/pci-host/articia.c

@ -200,7 +200,6 @@ static void articia_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = articia_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
/* TYPE_ARTICIA_PCI_HOST */

1
hw/pci-host/aspeed_pcie.c

@ -298,7 +298,6 @@ static void aspeed_pcie_rc_class_init(ObjectClass *klass, const void *data)
dc->desc = "ASPEED PCIe RC";
dc->realize = aspeed_pcie_rc_realize;
dc->fw_name = "pci";
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
hc->root_bus_path = aspeed_pcie_rc_root_bus_path;
device_class_set_props(dc, aspeed_pcie_rc_props);

3
hw/pci-host/designware.c

@ -593,8 +593,6 @@ static void designware_pcie_root_class_init(ObjectClass *klass,
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
k->vendor_id = PCI_VENDOR_ID_SYNOPSYS;
k->device_id = 0xABCD;
k->revision = 0;
@ -736,7 +734,6 @@ static void designware_pcie_host_class_init(ObjectClass *klass,
hc->root_bus_path = designware_pcie_host_root_bus_path;
dc->realize = designware_pcie_host_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
dc->vmsd = &vmstate_designware_pcie_host;
}

2
hw/pci-host/gpex.c

@ -200,7 +200,6 @@ static void gpex_host_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = gpex_host_root_bus_path;
dc->realize = gpex_host_realize;
dc->unrealize = gpex_host_unrealize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
device_class_set_props(dc, gpex_host_properties);
}
@ -242,7 +241,6 @@ static void gpex_root_class_init(ObjectClass *klass, const void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "QEMU generic PCIe host bridge";
dc->vmsd = &vmstate_gpex_root;
k->vendor_id = PCI_VENDOR_ID_REDHAT;

1
hw/pci-host/grackle.c

@ -140,7 +140,6 @@ static void grackle_class_init(ObjectClass *klass, const void *data)
dc->realize = grackle_realize;
device_class_set_props(dc, grackle_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = grackle_ofw_unit_address;
}

1
hw/pci-host/gt64120.c

@ -1298,7 +1298,6 @@ static void gt64120_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
device_class_set_props(dc, gt64120_properties);
dc->realize = gt64120_realize;
device_class_set_legacy_reset(dc, gt64120_reset);

1
hw/pci-host/mv64361.c

@ -108,7 +108,6 @@ static void mv64361_pcihost_class_init(ObjectClass *klass, const void *data)
dc->realize = mv64361_pcihost_realize;
device_class_set_props(dc, mv64361_pcihost_props);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo mv64361_pcihost_info = {

1
hw/pci-host/pnv_phb.c

@ -202,7 +202,6 @@ static void pnv_phb_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = pnv_phb_root_bus_path;
dc->realize = pnv_phb_realize;
device_class_set_props(dc, pnv_phb_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->user_creatable = true;
}

1
hw/pci-host/ppce500.c

@ -516,7 +516,6 @@ static void e500_pcihost_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = e500_pcihost_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
device_class_set_props(dc, pcihost_properties);
dc->vmsd = &vmstate_ppce500_pci;
}

26
hw/pci-host/q35.c

@ -194,7 +194,6 @@ static void q35_host_class_init(ObjectClass *klass, const void *data)
device_class_set_props(dc, q35_host_props);
/* Reason: needs to be wired up by pc_q35_init */
dc->user_creatable = false;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
}
@ -432,30 +431,27 @@ static void mch_update_smbase_smram(MCHPCIState *mch)
}
if (*reg == MCH_HOST_BRIDGE_F_SMBASE_QUERY) {
pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] =
MCH_HOST_BRIDGE_F_SMBASE_LCK;
pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = MCH_HOST_BRIDGE_F_SMBASE_LCK;
*reg = MCH_HOST_BRIDGE_F_SMBASE_IN_RAM;
return;
}
/*
* default/reset state, discard written value
* which will disable SMRAM balackhole at SMBASE
* reg value can come from register write/reset/migration source,
* update wmask to be in sync with it regardless of source
*/
if (pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] == 0xff) {
*reg = 0x00;
if (*reg == MCH_HOST_BRIDGE_F_SMBASE_IN_RAM) {
pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = MCH_HOST_BRIDGE_F_SMBASE_LCK;
return;
}
memory_region_transaction_begin();
if (*reg & MCH_HOST_BRIDGE_F_SMBASE_LCK) {
/* disable all writes */
pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] &=
~MCH_HOST_BRIDGE_F_SMBASE_LCK;
/* lock register at 0x2 and disable all writes */
pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0;
*reg = MCH_HOST_BRIDGE_F_SMBASE_LCK;
lck = true;
} else {
lck = false;
}
lck = *reg & MCH_HOST_BRIDGE_F_SMBASE_LCK;
memory_region_transaction_begin();
memory_region_set_enabled(&mch->smbase_blackhole, lck);
memory_region_set_enabled(&mch->smbase_window, lck);
memory_region_transaction_commit();

1
hw/pci-host/raven.c

@ -296,7 +296,6 @@ static void raven_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->realize = raven_pcihost_realizefn;
dc->fw_name = "pci";
}

1
hw/pci-host/remote.c

@ -55,7 +55,6 @@ static void remote_pcihost_class_init(ObjectClass *klass, const void *data)
dc->realize = remote_pcihost_realize;
dc->user_creatable = false;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
}

1
hw/pci-host/sabre.c

@ -505,7 +505,6 @@ static void sabre_class_init(ObjectClass *klass, const void *data)
dc->realize = sabre_realize;
device_class_set_legacy_reset(dc, sabre_reset);
device_class_set_props(dc, sabre_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = sabre_ofw_unit_address;
}

4
hw/pci-host/uninorth.c

@ -435,7 +435,6 @@ static void pci_unin_main_class_init(ObjectClass *klass, const void *data)
dc->realize = pci_unin_main_realize;
device_class_set_props(dc, pci_unin_main_pci_host_props);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = pci_unin_main_ofw_unit_address;
}
@ -453,7 +452,6 @@ static void pci_u3_agp_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_u3_agp_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_u3_agp_info = {
@ -469,7 +467,6 @@ static void pci_unin_agp_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_unin_agp_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_unin_agp_info = {
@ -485,7 +482,6 @@ static void pci_unin_internal_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_unin_internal_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_unin_internal_info = {

2
hw/pci-host/xilinx-pcie.c

@ -172,7 +172,6 @@ static void xilinx_pcie_host_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = xilinx_pcie_host_root_bus_path;
dc->realize = xilinx_pcie_host_realize;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
device_class_set_props(dc, xilinx_pcie_host_props);
}
@ -291,7 +290,6 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, const void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "Xilinx AXI-PCIe Host Bridge";
k->vendor_id = PCI_VENDOR_ID_XILINX;
k->device_id = 0x7021;

4
hw/pci/pci.c

@ -3171,6 +3171,10 @@ ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
return -EPERM;
}
if (priv_req && !pcie_pasid_priv_enabled(dev)) {
return -EPERM;
}
pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) {
return iommu_bus->iommu_ops->ats_request_translation(bus,

1
hw/pci/pci_host.c

@ -245,6 +245,7 @@ static void pci_host_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, pci_host_properties_common);
dc->vmsd = &vmstate_pcihost;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_host_type_info = {

21
hw/pci/pcie.c

@ -1340,6 +1340,16 @@ void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
dev->exp.pri_cap = offset;
}
static inline bool pcie_pasid_check_ctrl_bit_enabled(const PCIDevice *dev,
uint16_t mask)
{
if (!pci_is_express(dev) || !dev->exp.pasid_cap) {
return false;
}
return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) &
mask) != 0;
}
uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev)
{
if (!pcie_pri_enabled(dev)) {
@ -1359,11 +1369,12 @@ bool pcie_pri_enabled(const PCIDevice *dev)
bool pcie_pasid_enabled(const PCIDevice *dev)
{
if (!pci_is_express(dev) || !dev->exp.pasid_cap) {
return false;
}
return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) &
PCI_PASID_CTRL_ENABLE) != 0;
return pcie_pasid_check_ctrl_bit_enabled(dev, PCI_PASID_CTRL_ENABLE);
}
bool pcie_pasid_priv_enabled(PCIDevice *dev)
{
return pcie_pasid_check_ctrl_bit_enabled(dev, PCI_PASID_CTRL_PRIV);
}
bool pcie_ats_enabled(const PCIDevice *dev)

9
hw/pci/pcie_sriov.c

@ -195,14 +195,17 @@ bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
void pcie_sriov_pf_exit(PCIDevice *dev)
{
uint8_t *cfg;
if (dev->exp.sriov_cap == 0) {
return;
}
cfg = dev->config + dev->exp.sriov_cap;
if (dev->exp.sriov_pf.vf_user_created) {
uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
uint16_t total_vfs = pci_get_word(dev->config + PCI_SRIOV_TOTAL_VF);
uint16_t vf_dev_id = pci_get_word(dev->config + PCI_SRIOV_VF_DID);
uint16_t total_vfs = pci_get_word(cfg + PCI_SRIOV_TOTAL_VF);
uint16_t vf_dev_id = pci_get_word(cfg + PCI_SRIOV_VF_DID);
unregister_vfs(dev);
@ -213,8 +216,6 @@ void pcie_sriov_pf_exit(PCIDevice *dev)
pci_config_set_device_id(dev->exp.sriov_pf.vf[i]->config, vf_dev_id);
}
} else {
uint8_t *cfg = dev->config + dev->exp.sriov_cap;
unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF));
}
}

1
hw/pci/shpc.c

@ -735,7 +735,6 @@ void shpc_free(PCIDevice *d)
if (!shpc) {
return;
}
object_unparent(OBJECT(&shpc->mmio));
g_free(shpc->config);
g_free(shpc->cmask);
g_free(shpc->wmask);

5
hw/virtio/Kconfig

@ -127,6 +127,11 @@ config VHOST_USER_SCMI
default y
depends on VIRTIO && VHOST_USER && ARM
config VHOST_USER_SPI
bool
default y
depends on VIRTIO && VHOST_USER
config VHOST_USER_TEST
bool
default y

3
hw/virtio/meson.build

@ -28,6 +28,7 @@ if have_vhost
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SND', if_true: files('vhost-user-snd.c'))
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c'))
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SPI', if_true: files('vhost-user-spi.c'))
# PCI Stubs
system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_TEST'],
@ -42,6 +43,8 @@ if have_vhost
if_true: files('vhost-user-snd-pci.c'))
system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_INPUT'],
if_true: files('vhost-user-input-pci.c'))
system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SPI'],
if_true: files('vhost-user-spi-pci.c'))
endif
if have_vhost_vdpa
system_virtio_ss.add(files('vhost-vdpa.c'))

1
hw/virtio/vhost-shadow-virtqueue.c

@ -34,6 +34,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
switch (b) {
case VIRTIO_F_ANY_LAYOUT:
case VIRTIO_RING_F_EVENT_IDX:
case VIRTIO_RING_F_INDIRECT_DESC:
continue;
case VIRTIO_F_ACCESS_PLATFORM:

69
hw/virtio/vhost-user-spi-pci.c

@ -0,0 +1,69 @@
/*
* Vhost-user spi virtio device PCI glue
*
* Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/core/qdev-properties.h"
#include "hw/virtio/vhost-user-spi.h"
#include "hw/virtio/virtio-pci.h"
struct VHostUserSPIPCI {
VirtIOPCIProxy parent_obj;
VHostUserSPI vdev;
};
typedef struct VHostUserSPIPCI VHostUserSPIPCI;
#define TYPE_VHOST_USER_SPI_PCI "vhost-user-spi-pci-base"
DECLARE_INSTANCE_CHECKER(VHostUserSPIPCI, VHOST_USER_SPI_PCI,
TYPE_VHOST_USER_SPI_PCI)
static void vhost_user_spi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VHostUserSPIPCI *dev = VHOST_USER_SPI_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
vpci_dev->nvectors = 1;
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
static void vhost_user_spi_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
k->realize = vhost_user_spi_pci_realize;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
pcidev_k->revision = 0x00;
pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
}
static void vhost_user_spi_pci_instance_init(Object *obj)
{
VHostUserSPIPCI *dev = VHOST_USER_SPI_PCI(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VHOST_USER_SPI);
}
static const VirtioPCIDeviceTypeInfo vhost_user_spi_pci_info = {
.base_name = TYPE_VHOST_USER_SPI_PCI,
.non_transitional_name = "vhost-user-spi-pci",
.instance_size = sizeof(VHostUserSPIPCI),
.instance_init = vhost_user_spi_pci_instance_init,
.class_init = vhost_user_spi_pci_class_init,
};
static void vhost_user_spi_pci_register(void)
{
virtio_pci_types_register(&vhost_user_spi_pci_info);
}
type_init(vhost_user_spi_pci_register);

65
hw/virtio/vhost-user-spi.c

@ -0,0 +1,65 @@
/*
* Vhost-user spi virtio device
*
* Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/core/qdev-properties.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vhost-user-spi.h"
#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_spi.h"
static const Property vspi_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
};
static void vspi_realize(DeviceState *dev, Error **errp)
{
VHostUserBase *vub = VHOST_USER_BASE(dev);
VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev);
/* Fixed for SPI */
vub->virtio_id = VIRTIO_ID_SPI;
vub->num_vqs = 1;
vub->vq_size = 4;
vub->config_size = sizeof(struct virtio_spi_config);
vubc->parent_realize(dev, errp);
}
static const VMStateDescription vu_spi_vmstate = {
.name = "vhost-user-spi",
.unmigratable = 1,
};
static void vu_spi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
dc->vmsd = &vu_spi_vmstate;
device_class_set_props(dc, vspi_properties);
device_class_set_parent_realize(dc, vspi_realize,
&vubc->parent_realize);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
static const TypeInfo vu_spi_info = {
.name = TYPE_VHOST_USER_SPI,
.parent = TYPE_VHOST_USER_BASE,
.instance_size = sizeof(VHostUserSPI),
.class_init = vu_spi_class_init,
};
static void vu_spi_register_types(void)
{
type_register_static(&vu_spi_info);
}
type_init(vu_spi_register_types)

7
hw/virtio/vhost-user.c

@ -2225,6 +2225,13 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
}
}
if (!u->user->supports_inflight_migration ||
!virtio_has_feature(protocol_features,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
protocol_features &= ~(1ULL <<
VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT);
}
/* final set of protocol features */
dev->protocol_features = protocol_features;
err = vhost_user_set_protocol_features(dev, dev->protocol_features);

6
hw/virtio/vhost-vdpa.c

@ -905,7 +905,7 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev)
memory_listener_unregister(&v->shared->listener);
v->shared->listener_registered = false;
v->suspended = false;
v->shared->suspended = false;
return 0;
}
@ -1354,7 +1354,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev)
if (unlikely(r)) {
error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
} else {
v->suspended = true;
v->shared->suspended = true;
return;
}
}
@ -1481,7 +1481,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
return 0;
}
if (!v->suspended) {
if (!v->shared->suspended) {
/*
* Cannot trust in value returned by device, let vhost recover used
* idx from guest.

66
hw/virtio/vhost.c

@ -592,11 +592,13 @@ static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
/*
* Some backends (like vhost-user) can only handle memory regions
* that have an fd (can be mapped into a different process). Filter
* the ones without an fd out, if requested.
*
* TODO: we might have to limit to MAP_SHARED as well.
* the ones without an fd out, if requested. Also make sure that
* this region is mapped as shared so that the vhost backend can
* observe modifications to this region, otherwise we consider it
* private.
*/
if (memory_region_get_fd(section->mr) < 0 &&
if ((memory_region_get_fd(section->mr) < 0 ||
!qemu_ram_is_shared(section->mr->ram_block)) &&
dev->vhost_ops->vhost_backend_no_private_memslots &&
dev->vhost_ops->vhost_backend_no_private_memslots(dev)) {
trace_vhost_reject_section(mr->name, 2);
@ -1916,6 +1918,62 @@ void vhost_get_features_ex(struct vhost_dev *hdev,
}
}
static bool vhost_inflight_buffer_pre_load(void *opaque, Error **errp)
{
struct vhost_inflight *inflight = opaque;
int fd = -1;
void *addr = qemu_memfd_alloc("vhost-inflight", inflight->size,
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
&fd, errp);
if (*errp) {
return -ENOMEM;
}
inflight->offset = 0;
inflight->addr = addr;
inflight->fd = fd;
return true;
}
const VMStateDescription vmstate_vhost_inflight_region_buffer = {
.name = "vhost-inflight-region/buffer",
.pre_load_errp = vhost_inflight_buffer_pre_load,
.fields = (const VMStateField[]) {
VMSTATE_VBUFFER_UINT64(addr, struct vhost_inflight, 0, NULL, size),
VMSTATE_END_OF_LIST()
}
};
static bool vhost_inflight_region_post_load(void *opaque,
int version_id,
Error **errp)
{
struct vhost_inflight *inflight = opaque;
if (inflight->addr == NULL) {
error_setg(errp, "inflight buffer subsection has not been loaded");
return false;
}
return true;
}
const VMStateDescription vmstate_vhost_inflight_region = {
.name = "vhost-inflight-region",
.post_load_errp = vhost_inflight_region_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64(size, struct vhost_inflight),
VMSTATE_UINT16(queue_size, struct vhost_inflight),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {
&vmstate_vhost_inflight_region_buffer,
NULL
}
};
void vhost_ack_features_ex(struct vhost_dev *hdev, const int *feature_bits,
const uint64_t *features)
{

7
hw/virtio/virtio-crypto.c

@ -767,11 +767,18 @@ virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto,
uint32_t len;
uint8_t *src = NULL;
uint8_t *dst = NULL;
uint64_t max_len;
asym_op_info = g_new0(CryptoDevBackendAsymOpInfo, 1);
src_len = ldl_le_p(&req->para.src_data_len);
dst_len = ldl_le_p(&req->para.dst_data_len);
max_len = (uint64_t)src_len + dst_len;
if (unlikely(max_len > vcrypto->conf.max_size)) {
virtio_error(vdev, "virtio-crypto asym request is too large");
goto err;
}
if (src_len > 0) {
src = g_malloc0(src_len);
len = iov_to_buf(iov, out_num, 0, src, src_len);

20
hw/virtio/virtio-pci.c

@ -2183,15 +2183,17 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
}
if (pci_is_vf(&proxy->pci_dev)) {
pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset);
proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF;
} else {
res = pcie_sriov_pf_init_from_user_created_vfs(
&proxy->pci_dev, proxy->last_pcie_cap_offset, errp);
if (res > 0) {
proxy->last_pcie_cap_offset += res;
virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
if (pci_is_express(&proxy->pci_dev)) {
if (pci_is_vf(&proxy->pci_dev)) {
pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset);
proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF;
} else {
res = pcie_sriov_pf_init_from_user_created_vfs(
&proxy->pci_dev, proxy->last_pcie_cap_offset, errp);
if (res > 0) {
proxy->last_pcie_cap_offset += res;
virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
}
}
}
}

1
hw/virtio/virtio-pmem.c

@ -73,7 +73,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
trace_virtio_pmem_flush_request();
req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
if (!req_data) {
virtio_error(vdev, "virtio-pmem missing request data");
return;
}

4
hw/virtio/virtio.c

@ -48,6 +48,7 @@
#include "standard-headers/linux/virtio_iommu.h"
#include "standard-headers/linux/virtio_mem.h"
#include "standard-headers/linux/virtio_vsock.h"
#include "standard-headers/linux/virtio_spi.h"
/*
* Maximum size of virtio device config space
@ -196,7 +197,8 @@ const char *virtio_device_names[] = {
[VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
[VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
[VIRTIO_ID_BT] = "virtio-bluetooth",
[VIRTIO_ID_GPIO] = "virtio-gpio"
[VIRTIO_ID_GPIO] = "virtio-gpio",
[VIRTIO_ID_SPI] = "virtio-spi"
};
static const char *virtio_id_to_name(uint16_t device_id)

6
include/hw/acpi/ghes.h

@ -98,9 +98,9 @@ void acpi_build_hest(AcpiGhesState *ags, GArray *table_data,
const char *oem_id, const char *oem_table_id);
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
GArray *hardware_errors);
int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t error_physical_addr);
void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
uint64_t error_physical_addr, Error **errp);
bool ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
uint16_t source_id, Error **errp);
/**

1
include/hw/i386/intel_iommu.h

@ -275,6 +275,7 @@ struct IntelIOMMUState {
bool scalable_mode; /* RO - is Scalable Mode supported? */
bool fsts; /* RO - is first stage translation supported? */
bool snoop_control; /* RO - is SNP filed supported? */
bool svm; /* RO - is SVA/SVM supported? */
dma_addr_t root; /* Current root table pointer */
bool root_scalable; /* Type of root table (scalable or not) */

1
include/hw/pci/pcie.h

@ -165,5 +165,6 @@ void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev);
bool pcie_pri_enabled(const PCIDevice *dev);
bool pcie_pasid_enabled(const PCIDevice *dev);
bool pcie_pasid_priv_enabled(PCIDevice *dev);
bool pcie_ats_enabled(const PCIDevice *dev);
#endif /* QEMU_PCIE_H */

1
include/hw/virtio/vhost-user-blk.h

@ -52,6 +52,7 @@ struct VHostUserBlk {
bool started_vu;
bool skip_get_vring_base_on_force_shutdown;
bool inflight_migration;
};
#endif

25
include/hw/virtio/vhost-user-spi.h

@ -0,0 +1,25 @@
/*
* Vhost-user spi virtio device
*
* Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef QEMU_VHOST_USER_SPI_H
#define QEMU_VHOST_USER_SPI_H
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
#include "hw/virtio/vhost-user-base.h"
#define TYPE_VHOST_USER_SPI "vhost-user-spi-device"
OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSPI, VHOST_USER_SPI)
struct VHostUserSPI {
VHostUserBase parent_obj;
};
#endif /* QEMU_VHOST_USER_SPI_H */

2
include/hw/virtio/vhost-user.h

@ -32,6 +32,7 @@ enum VhostUserProtocolFeature {
/* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_DEVICE_STATE = 19,
VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT = 20,
VHOST_USER_PROTOCOL_F_MAX
};
@ -68,6 +69,7 @@ typedef struct VhostUserState {
GPtrArray *notifiers;
int memory_slots;
bool supports_config;
bool supports_inflight_migration;
} VhostUserState;
/**

8
include/hw/virtio/vhost-vdpa.h

@ -76,6 +76,12 @@ typedef struct vhost_vdpa_shared {
/* SVQ switching is in progress, or already completed? */
SVQTransitionState svq_switching;
/*
* Device suspended successfully.
* The vhost_vdpa devices cannot have different suspended states.
*/
bool suspended;
} VhostVDPAShared;
typedef struct vhost_vdpa {
@ -83,8 +89,6 @@ typedef struct vhost_vdpa {
uint32_t address_space_id;
uint64_t acked_features;
bool shadow_vqs_enabled;
/* Device suspended successfully */
bool suspended;
VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;

6
include/hw/virtio/vhost.h

@ -554,4 +554,10 @@ static inline int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f,
}
#endif
extern const VMStateDescription vmstate_vhost_inflight_region;
#define VMSTATE_VHOST_INFLIGHT_REGION(_field, _state) \
VMSTATE_STRUCT_POINTER(_field, _state, \
vmstate_vhost_inflight_region, \
struct vhost_inflight)
#endif

10
include/migration/vmstate.h

@ -706,6 +706,16 @@ extern const VMStateInfo vmstate_info_qlist;
.offset = offsetof(_state, _field), \
}
#define VMSTATE_VBUFFER_UINT64(_field, _state, _version, _test, _field_size) { \
.name = (stringify(_field)), \
.version_id = (_version), \
.field_exists = (_test), \
.size_offset = vmstate_offset_value(_state, _field_size, uint64_t),\
.info = &vmstate_info_buffer, \
.flags = VMS_VBUFFER | VMS_POINTER, \
.offset = offsetof(_state, _field), \
}
#define VMSTATE_VBUFFER_ALLOC_UINT32(_field, _state, _version, \
_test, _field_size) { \
.name = (stringify(_field)), \

181
include/standard-headers/linux/virtio_spi.h

@ -0,0 +1,181 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Copyright (C) 2023 OpenSynergy GmbH
* Copyright (C) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _LINUX_VIRTIO_VIRTIO_SPI_H
#define _LINUX_VIRTIO_VIRTIO_SPI_H
#include "standard-headers/linux/types.h"
#include "standard-headers/linux/virtio_config.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_types.h"
/* Sample data on trailing clock edge */
#define VIRTIO_SPI_CPHA _BITUL(0)
/* Clock is high when IDLE */
#define VIRTIO_SPI_CPOL _BITUL(1)
/* Chip Select is active high */
#define VIRTIO_SPI_CS_HIGH _BITUL(2)
/* Transmit LSB first */
#define VIRTIO_SPI_MODE_LSB_FIRST _BITUL(3)
/* Loopback mode */
#define VIRTIO_SPI_MODE_LOOP _BITUL(4)
/**
* struct virtio_spi_config - All config fields are read-only for the
* Virtio SPI driver
* @cs_max_number: maximum number of chipselect the host SPI controller
* supports.
* @cs_change_supported: indicates if the host SPI controller supports to toggle
* chipselect after each transfer in one message:
* 0: unsupported, chipselect will be kept in active state throughout the
* message transaction;
* 1: supported.
* Note: Message here contains a sequence of SPI transfers.
* @tx_nbits_supported: indicates the supported number of bit for writing:
* bit 0: DUAL (2-bit transfer), 1 for supported
* bit 1: QUAD (4-bit transfer), 1 for supported
* bit 2: OCTAL (8-bit transfer), 1 for supported
* other bits are reserved as 0, 1-bit transfer is always supported.
* @rx_nbits_supported: indicates the supported number of bit for reading:
* bit 0: DUAL (2-bit transfer), 1 for supported
* bit 1: QUAD (4-bit transfer), 1 for supported
* bit 2: OCTAL (8-bit transfer), 1 for supported
* other bits are reserved as 0, 1-bit transfer is always supported.
* @bits_per_word_mask: mask indicating which values of bits_per_word are
* supported. If not set, no limitation for bits_per_word.
* @mode_func_supported: indicates the following features are supported or not:
* bit 0-1: CPHA feature
* 0b00: invalid, should support as least one CPHA setting
* 0b01: supports CPHA=0 only
* 0b10: supports CPHA=1 only
* 0b11: supports CPHA=0 and CPHA=1.
* bit 2-3: CPOL feature
* 0b00: invalid, should support as least one CPOL setting
* 0b01: supports CPOL=0 only
* 0b10: supports CPOL=1 only
* 0b11: supports CPOL=0 and CPOL=1.
* bit 4: chipselect active high feature, 0 for unsupported and 1 for
* supported, chipselect active low is supported by default.
* bit 5: LSB first feature, 0 for unsupported and 1 for supported,
* MSB first is supported by default.
* bit 6: loopback mode feature, 0 for unsupported and 1 for supported,
* normal mode is supported by default.
* @max_freq_hz: the maximum clock rate supported in Hz unit, 0 means no
* limitation for transfer speed.
* @max_word_delay_ns: the maximum word delay supported, in nanoseconds.
* A value of 0 indicates that word delay is unsupported.
* Each transfer may consist of a sequence of words.
* @max_cs_setup_ns: the maximum delay supported after chipselect is asserted,
* in ns unit, 0 means delay is not supported to introduce after chipselect is
* asserted.
* @max_cs_hold_ns: the maximum delay supported before chipselect is deasserted,
* in ns unit, 0 means delay is not supported to introduce before chipselect
* is deasserted.
* @max_cs_incative_ns: maximum delay supported after chipselect is deasserted,
* in ns unit, 0 means delay is not supported to introduce after chipselect is
* deasserted.
*/
struct virtio_spi_config {
uint8_t cs_max_number;
uint8_t cs_change_supported;
#define VIRTIO_SPI_RX_TX_SUPPORT_DUAL _BITUL(0)
#define VIRTIO_SPI_RX_TX_SUPPORT_QUAD _BITUL(1)
#define VIRTIO_SPI_RX_TX_SUPPORT_OCTAL _BITUL(2)
uint8_t tx_nbits_supported;
uint8_t rx_nbits_supported;
uint32_t bits_per_word_mask;
#define VIRTIO_SPI_MF_SUPPORT_CPHA_0 _BITUL(0)
#define VIRTIO_SPI_MF_SUPPORT_CPHA_1 _BITUL(1)
#define VIRTIO_SPI_MF_SUPPORT_CPOL_0 _BITUL(2)
#define VIRTIO_SPI_MF_SUPPORT_CPOL_1 _BITUL(3)
#define VIRTIO_SPI_MF_SUPPORT_CS_HIGH _BITUL(4)
#define VIRTIO_SPI_MF_SUPPORT_LSB_FIRST _BITUL(5)
#define VIRTIO_SPI_MF_SUPPORT_LOOPBACK _BITUL(6)
uint32_t mode_func_supported;
uint32_t max_freq_hz;
uint32_t max_word_delay_ns;
uint32_t max_cs_setup_ns;
uint32_t max_cs_hold_ns;
uint32_t max_cs_inactive_ns;
};
/**
* struct spi_transfer_head - virtio SPI transfer descriptor
* @chip_select_id: chipselect index the SPI transfer used.
* @bits_per_word: the number of bits in each SPI transfer word.
* @cs_change: whether to deselect device after finishing this transfer
* before starting the next transfer, 0 means cs keep asserted and
* 1 means cs deasserted then asserted again.
* @tx_nbits: bus width for write transfer.
* 0,1: bus width is 1, also known as SINGLE
* 2 : bus width is 2, also known as DUAL
* 4 : bus width is 4, also known as QUAD
* 8 : bus width is 8, also known as OCTAL
* other values are invalid.
* @rx_nbits: bus width for read transfer.
* 0,1: bus width is 1, also known as SINGLE
* 2 : bus width is 2, also known as DUAL
* 4 : bus width is 4, also known as QUAD
* 8 : bus width is 8, also known as OCTAL
* other values are invalid.
* @reserved: for future use.
* @mode: SPI transfer mode.
* bit 0: CPHA, determines the timing (i.e. phase) of the data
* bits relative to the clock pulses.For CPHA=0, the
* "out" side changes the data on the trailing edge of the
* preceding clock cycle, while the "in" side captures the data
* on (or shortly after) the leading edge of the clock cycle.
* For CPHA=1, the "out" side changes the data on the leading
* edge of the current clock cycle, while the "in" side
* captures the data on (or shortly after) the trailing edge of
* the clock cycle.
* bit 1: CPOL, determines the polarity of the clock. CPOL=0 is a
* clock which idles at 0, and each cycle consists of a pulse
* of 1. CPOL=1 is a clock which idles at 1, and each cycle
* consists of a pulse of 0.
* bit 2: CS_HIGH, if 1, chip select active high, else active low.
* bit 3: LSB_FIRST, determines per-word bits-on-wire, if 0, MSB
* first, else LSB first.
* bit 4: LOOP, loopback mode.
* @freq: the transfer speed in Hz.
* @word_delay_ns: delay to be inserted between consecutive words of a
* transfer, in ns unit.
* @cs_setup_ns: delay to be introduced after CS is asserted, in ns
* unit.
* @cs_delay_hold_ns: delay to be introduced before CS is deasserted
* for each transfer, in ns unit.
* @cs_change_delay_inactive_ns: delay to be introduced after CS is
* deasserted and before next asserted, in ns unit.
*/
struct spi_transfer_head {
uint8_t chip_select_id;
uint8_t bits_per_word;
uint8_t cs_change;
uint8_t tx_nbits;
uint8_t rx_nbits;
uint8_t reserved[3];
uint32_t mode;
uint32_t freq;
uint32_t word_delay_ns;
uint32_t cs_setup_ns;
uint32_t cs_delay_hold_ns;
uint32_t cs_change_delay_inactive_ns;
};
/**
* struct spi_transfer_result - virtio SPI transfer result
* @result: Transfer result code.
* VIRTIO_SPI_TRANS_OK: Transfer successful.
* VIRTIO_SPI_PARAM_ERR: Parameter error.
* VIRTIO_SPI_TRANS_ERR: Transfer error.
*/
struct spi_transfer_result {
#define VIRTIO_SPI_TRANS_OK 0
#define VIRTIO_SPI_PARAM_ERR 1
#define VIRTIO_SPI_TRANS_ERR 2
uint8_t result;
};
#endif /* #ifndef _LINUX_VIRTIO_VIRTIO_SPI_H */

10
target/arm/kvm.c

@ -2473,13 +2473,9 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
*/
if (code == BUS_MCEERR_AR) {
kvm_cpu_synchronize_state(c);
if (!acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC,
paddr)) {
kvm_inject_arm_sea(c);
} else {
error_report("failed to record the error");
abort();
}
acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC,
paddr, &error_fatal);
kvm_inject_arm_sea(c);
}
return;
}

6
tests/qtest/q35-test.c

@ -206,12 +206,6 @@ static void test_smram_smbase_lock(void)
qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
/* check that writing junk to 0x9c before before negotiating is ignored */
for (i = 0; i < 0xff; i++) {
qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0);
}
/* enable SMRAM at SMBASE */
qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, 0xff);
g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0x01);

Loading…
Cancel
Save