Browse Source

vhost: Fix used memslot tracking when destroying a vhost device

When we unplug a vhost device, we end up calling vhost_dev_cleanup()
where we do a memory_listener_unregister().

This memory_listener_unregister() call will end up disconnecting the
listener from the address space through listener_del_address_space().

In that process, we effectively communicate the removal of all memory
regions from that listener, resulting in region_del() + commit()
callbacks getting triggered.

So in case of vhost, we end up calling vhost_commit() with no remaining
memory slots (0).

In vhost_commit() we end up overwriting the global variables
used_memslots / used_shared_memslots, used for detecting the number
of free memslots. With used_memslots / used_shared_memslots set to 0
by vhost_commit() during device removal, we'll later assume that the
other vhost devices still have plenty of memslots left when calling
vhost_get_free_memslots().

Let's fix it by simply removing the global variables and depending
only on the actual per-device count.

Easy to reproduce by adding two vhost-user devices to a VM and then
hot-unplugging one of them.

While at it, detect unexpected underflows in vhost_get_free_memslots()
and issue a warning.

Reported-by: yuanminghao <yuanmh12@chinatelecom.cn>
Link: https://lore.kernel.org/qemu-devel/20241121060755.164310-1-yuanmh12@chinatelecom.cn/
Fixes: 2ce68e4cf5 ("vhost: add vhost_has_free_slot() interface")
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20250603111336.1858888-1-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
pull/294/head
David Hildenbrand 10 months ago
committed by Michael S. Tsirkin
parent
commit
9f749129e2
  1. 37
      hw/virtio/vhost.c

37
hw/virtio/vhost.c

@ -47,12 +47,6 @@ static struct vhost_log *vhost_log[VHOST_BACKEND_TYPE_MAX];
static struct vhost_log *vhost_log_shm[VHOST_BACKEND_TYPE_MAX];
static QLIST_HEAD(, vhost_dev) vhost_log_devs[VHOST_BACKEND_TYPE_MAX];
/* Memslots used by backends that support private memslots (without an fd). */
static unsigned int used_memslots;
/* Memslots used by backends that only support shared memslots (with an fd). */
static unsigned int used_shared_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
QLIST_HEAD_INITIALIZER(vhost_devices);
@ -74,15 +68,15 @@ unsigned int vhost_get_free_memslots(void)
QLIST_FOREACH(hdev, &vhost_devices, entry) {
unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
unsigned int cur_free;
unsigned int cur_free = r - hdev->mem->nregions;
if (hdev->vhost_ops->vhost_backend_no_private_memslots &&
hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) {
cur_free = r - used_shared_memslots;
if (unlikely(r < hdev->mem->nregions)) {
warn_report_once("used (%u) vhost backend memory slots exceed"
" the device limit (%u).", hdev->mem->nregions, r);
free = 0;
} else {
cur_free = r - used_memslots;
free = MIN(free, cur_free);
}
free = MIN(free, cur_free);
}
return free;
}
@ -666,13 +660,6 @@ static void vhost_commit(MemoryListener *listener)
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
if (dev->vhost_ops->vhost_backend_no_private_memslots &&
dev->vhost_ops->vhost_backend_no_private_memslots(dev)) {
used_shared_memslots = dev->mem->nregions;
} else {
used_memslots = dev->mem->nregions;
}
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
@ -1619,15 +1606,11 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
/*
* The listener we registered properly updated the corresponding counter.
* So we can trust that these values are accurate.
* The listener we registered properly setup the number of required
* memslots in vhost_commit().
*/
if (hdev->vhost_ops->vhost_backend_no_private_memslots &&
hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) {
used = used_shared_memslots;
} else {
used = used_memslots;
}
used = hdev->mem->nregions;
/*
* We assume that all reserved memslots actually require a real memslot
* in our vhost backend. This might not be true, for example, if the

Loading…
Cancel
Save