Browse Source

Pull request

- Hanna's fix a regression that hangs the userspace NVMe block driver.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmlAIbcACgkQnKSrs4Gr
 c8iuqgf/VV2OPXW2t56uzuUmf+220pnRAaaGLw7atuWmaRJQ8/tHZU23Vxbu7JtX
 ZLJfObJaoGHpCeWCFJ3RccsPabf19hsDIJyki9U6f2+B+OutWLlmcp2uLtQJ8FNw
 2jMYSuT6XsCnm6VF3UIegDBTh6lvjyjDUVNAsWeiV6wHE61Oj3RD4joif52hx5uE
 xcDPii9fiF8S9tD3CKDGxR8fw7olFXiG2ojxqRZklZuHM6SfFHespWeTr9voLfgL
 maBJO3qyS6YFH1mFuIJvvCykGN2EI6tT1nlQw8et3oUGF+GN45yqLcK12/b7lWKF
 jTE8RCPCswFD4FF3eXJpcZRysi988A==
 =Jx5T
 -----END PGP SIGNATURE-----

Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging

Pull request

- Hanna's fix a regression that hangs the userspace NVMe block driver.

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmlAIbcACgkQnKSrs4Gr
# c8iuqgf/VV2OPXW2t56uzuUmf+220pnRAaaGLw7atuWmaRJQ8/tHZU23Vxbu7JtX
# ZLJfObJaoGHpCeWCFJ3RccsPabf19hsDIJyki9U6f2+B+OutWLlmcp2uLtQJ8FNw
# 2jMYSuT6XsCnm6VF3UIegDBTh6lvjyjDUVNAsWeiV6wHE61Oj3RD4joif52hx5uE
# xcDPii9fiF8S9tD3CKDGxR8fw7olFXiG2ojxqRZklZuHM6SfFHespWeTr9voLfgL
# maBJO3qyS6YFH1mFuIJvvCykGN2EI6tT1nlQw8et3oUGF+GN45yqLcK12/b7lWKF
# jTE8RCPCswFD4FF3eXJpcZRysi988A==
# =Jx5T
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 16 Dec 2025 01:56:55 AM AEDT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [unknown]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* tag 'block-pull-request' of https://gitlab.com/stefanha/qemu:
  Revert "nvme: Fix coroutine waking"

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
pull/314/head
Richard Henderson 4 months ago
parent
commit
a69964ff96
  1. 56
      block/nvme.c

56
block/nvme.c

@ -1200,36 +1200,26 @@ fail:
typedef struct {
Coroutine *co;
bool skip_yield;
int ret;
AioContext *ctx;
} NVMeCoData;
static void nvme_rw_cb_bh(void *opaque)
{
NVMeCoData *data = opaque;
qemu_coroutine_enter(data->co);
}
/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */
static void nvme_rw_cb(void *opaque, int ret)
{
NVMeCoData *data = opaque;
data->ret = ret;
if (data->co == qemu_coroutine_self()) {
/*
* Fast path: We are inside of the request coroutine (through
* nvme_submit_command, nvme_deferred_fn, nvme_process_completion).
* We can set data->skip_yield here to keep the coroutine from
* yielding, and then we don't need to schedule a BH to wake it.
*/
data->skip_yield = true;
} else {
/*
* Safe to call: The case where we run in the request coroutine is
* handled above, so we must be independent of it; and without
* skip_yield set, the coroutine will yield.
* No need to release NVMeQueuePair.lock (we are called without it
* held). (Note: If we enter the coroutine here, @data will
* probably be dangling once aio_co_wake() returns.)
*/
aio_co_wake(data->co);
if (!data->co) {
/* The rw coroutine hasn't yielded, don't try to enter. */
return;
}
replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
}
static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
@ -1253,7 +1243,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
.cdw12 = cpu_to_le32(cdw12),
};
NVMeCoData data = {
.co = qemu_coroutine_self(),
.ctx = bdrv_get_aio_context(bs),
.ret = -EINPROGRESS,
};
@ -1270,7 +1260,9 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
return r;
}
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
if (!data.skip_yield) {
data.co = qemu_coroutine_self();
while (data.ret == -EINPROGRESS) {
qemu_coroutine_yield();
}
@ -1366,7 +1358,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
.nsid = cpu_to_le32(s->nsid),
};
NVMeCoData data = {
.co = qemu_coroutine_self(),
.ctx = bdrv_get_aio_context(bs),
.ret = -EINPROGRESS,
};
@ -1374,7 +1366,9 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
req = nvme_get_free_req(ioq);
assert(req);
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
if (!data.skip_yield) {
data.co = qemu_coroutine_self();
if (data.ret == -EINPROGRESS) {
qemu_coroutine_yield();
}
@ -1415,7 +1409,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
};
NVMeCoData data = {
.co = qemu_coroutine_self(),
.ctx = bdrv_get_aio_context(bs),
.ret = -EINPROGRESS,
};
@ -1435,7 +1429,9 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
assert(req);
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
if (!data.skip_yield) {
data.co = qemu_coroutine_self();
while (data.ret == -EINPROGRESS) {
qemu_coroutine_yield();
}
@ -1463,7 +1459,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
};
NVMeCoData data = {
.co = qemu_coroutine_self(),
.ctx = bdrv_get_aio_context(bs),
.ret = -EINPROGRESS,
};
@ -1508,7 +1504,9 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
trace_nvme_dsm(s, offset, bytes);
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
if (!data.skip_yield) {
data.co = qemu_coroutine_self();
while (data.ret == -EINPROGRESS) {
qemu_coroutine_yield();
}

Loading…
Cancel
Save