Browse Source

virtio-gpu: Destroy virgl resources on virtio-gpu reset

Properly destroy virgl resources on virtio-gpu reset to not leak resources
on a hot reboot of a VM.

Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Message-ID: <20260303151422.977399-18-dmitry.osipenko@collabora.com>
Message-ID: <20260304165043.1437519-20-alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
master
Dmitry Osipenko 1 month ago
committed by Alex Bennée
parent
commit
bd9258917f
  1. 18
      hw/display/virtio-gpu-gl.c
  2. 123
      hw/display/virtio-gpu-virgl.c
  3. 6
      include/hw/virtio/virtio-gpu.h

18
hw/display/virtio-gpu-gl.c

@ -63,29 +63,14 @@ static void virtio_gpu_gl_flushed(VirtIOGPUBase *b)
static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
VirtIOGPUGL *gl = VIRTIO_GPU_GL(vdev);
struct virtio_gpu_ctrl_command *cmd;
if (!virtio_queue_ready(vq)) {
return;
}
switch (gl->renderer_state) {
case RS_RESET:
virtio_gpu_virgl_reset(g);
/* fallthrough */
case RS_START:
if (virtio_gpu_virgl_init(g)) {
gl->renderer_state = RS_INIT_FAILED;
return;
}
gl->renderer_state = RS_INITED;
break;
case RS_INIT_FAILED:
if (!virtio_gpu_virgl_update_render_state(g)) {
return;
case RS_INITED:
break;
}
cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
@ -201,6 +186,7 @@ static void virtio_gpu_gl_class_init(ObjectClass *klass, const void *data)
vgc->process_cmd = virtio_gpu_virgl_process_cmd;
vgc->update_cursor_data = virtio_gpu_gl_update_cursor_data;
vgc->resource_destroy = virtio_gpu_virgl_resource_destroy;
vdc->realize = virtio_gpu_gl_device_realize;
vdc->unrealize = virtio_gpu_gl_device_unrealize;
vdc->reset = virtio_gpu_gl_reset;

123
hw/display/virtio-gpu-virgl.c

@ -96,6 +96,10 @@ static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
{
VirtIOGPU *g = opaque;
if (!virtio_gpu_virgl_update_render_state(g)) {
return;
}
virtio_gpu_process_cmdq(g);
}
@ -344,14 +348,46 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
virgl_renderer_resource_create(&args, NULL, 0);
}
static int
virtio_gpu_virgl_resource_unref(VirtIOGPU *g,
struct virtio_gpu_virgl_resource *res,
bool *suspended)
{
struct iovec *res_iovs = NULL;
int num_iovs = 0;
#if VIRGL_VERSION_MAJOR >= 1
int ret;
ret = virtio_gpu_virgl_unmap_resource_blob(g, res, suspended);
if (ret) {
return ret;
}
if (*suspended) {
return 0;
}
#endif
virgl_renderer_resource_detach_iov(res->base.resource_id,
&res_iovs,
&num_iovs);
if (res_iovs != NULL && num_iovs != 0) {
virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
virgl_renderer_resource_unref(res->base.resource_id);
QTAILQ_REMOVE(&g->reslist, &res->base, next);
g_free(res);
return 0;
}
static void virgl_cmd_resource_unref(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
bool *cmd_suspended)
{
struct virtio_gpu_resource_unref unref;
struct virtio_gpu_virgl_resource *res;
struct iovec *res_iovs = NULL;
int num_iovs = 0;
VIRTIO_GPU_FILL_CMD(unref);
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
@ -364,27 +400,21 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
return;
}
#if VIRGL_VERSION_MAJOR >= 1
if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
if (*cmd_suspended) {
return;
}
#endif
virtio_gpu_virgl_resource_unref(g, res, cmd_suspended);
}
virgl_renderer_resource_detach_iov(unref.resource_id,
&res_iovs,
&num_iovs);
if (res_iovs != NULL && num_iovs != 0) {
virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
virgl_renderer_resource_unref(unref.resource_id);
void virtio_gpu_virgl_resource_destroy(VirtIOGPU *g,
struct virtio_gpu_simple_resource *base,
Error **errp)
{
struct virtio_gpu_virgl_resource *res;
bool suspended = false;
QTAILQ_REMOVE(&g->reslist, &res->base, next);
res = container_of(base, struct virtio_gpu_virgl_resource, base);
g_free(res);
if (virtio_gpu_virgl_resource_unref(g, res, &suspended)) {
error_setg(errp, "failed to destroy virgl resource");
}
}
static void virgl_cmd_context_create(VirtIOGPU *g,
@ -1291,6 +1321,10 @@ static void virtio_gpu_fence_poll(void *opaque)
VirtIOGPU *g = opaque;
VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
if (!virtio_gpu_virgl_update_render_state(g)) {
return;
}
virgl_renderer_poll();
virtio_gpu_process_cmdq(g);
if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
@ -1313,14 +1347,30 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
}
}
void virtio_gpu_virgl_reset(VirtIOGPU *g)
static bool virtio_gpu_virgl_reset(VirtIOGPU *g)
{
struct virtio_gpu_simple_resource *res, *tmp;
/*
* Virgl blob resource unmapping can be suspended and
* deferred on unref, ensure that destruction is completed.
*/
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
virtio_gpu_virgl_resource_destroy(g, res, NULL);
}
if (!QTAILQ_EMPTY(&g->reslist)) {
return false;
}
virgl_renderer_reset();
virtio_gpu_virgl_reset_async_fences(g);
return true;
}
int virtio_gpu_virgl_init(VirtIOGPU *g)
static int virtio_gpu_virgl_init(VirtIOGPU *g)
{
int ret;
uint32_t flags = 0;
@ -1398,6 +1448,35 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
return 0;
}
bool virtio_gpu_virgl_update_render_state(VirtIOGPU *g)
{
VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
switch (gl->renderer_state) {
case RS_RESET:
virgl_renderer_force_ctx_0();
if (!virtio_gpu_virgl_reset(g)) {
return false;
}
/* fallthrough */
case RS_START:
if (virtio_gpu_virgl_init(g)) {
gl->renderer_state = RS_INIT_FAILED;
return false;
}
gl->renderer_state = RS_INITED;
break;
case RS_INIT_FAILED:
return false;
case RS_INITED:
break;
}
return true;
}
static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
{
g_array_append_val(capset_ids, capset_id);

6
include/hw/virtio/virtio-gpu.h

@ -390,9 +390,11 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g);
void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g);
void virtio_gpu_virgl_resource_destroy(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res,
Error **errp);
bool virtio_gpu_virgl_update_render_state(VirtIOGPU *g);
#endif

Loading…
Cancel
Save