QEMU main repository: Please see https://www.qemu.org/docs/master/devel/submitting-a-patch.html for how to submit changes to QEMU. Pull Requests are ignored. Please only use release tarballs from the QEMU website. http://www.qemu.org
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
190 lines
5.1 KiB
190 lines
5.1 KiB
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* poll(2) file descriptor monitoring
|
|
*
|
|
* Uses ppoll(2) when available, g_poll() otherwise.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "aio-posix.h"
|
|
#include "qemu/rcu_queue.h"
|
|
|
|
/*
|
|
* These thread-local variables are used only in fdmon_poll_wait() around the
|
|
* call to the poll() system call. In particular they are not used while
|
|
* aio_poll is performing callbacks, which makes it much easier to think about
|
|
* reentrancy!
|
|
*
|
|
* Stack-allocated arrays would be perfect but they have size limitations;
|
|
* heap allocation is expensive enough that we want to reuse arrays across
|
|
* calls to aio_poll(). And because poll() has to be called without holding
|
|
* any lock, the arrays cannot be stored in AioContext. Thread-local data
|
|
* has none of the disadvantages of these three options.
|
|
*/
|
|
static __thread GPollFD *pollfds;
|
|
static __thread AioHandler **nodes;
|
|
static __thread unsigned npfd, nalloc;
|
|
static __thread Notifier pollfds_cleanup_notifier;
|
|
|
|
static void pollfds_cleanup(Notifier *n, void *unused)
|
|
{
|
|
g_assert(npfd == 0);
|
|
g_free(pollfds);
|
|
g_free(nodes);
|
|
nalloc = 0;
|
|
}
|
|
|
|
static void add_pollfd(AioHandler *node)
|
|
{
|
|
if (npfd == nalloc) {
|
|
if (nalloc == 0) {
|
|
pollfds_cleanup_notifier.notify = pollfds_cleanup;
|
|
qemu_thread_atexit_add(&pollfds_cleanup_notifier);
|
|
nalloc = 8;
|
|
} else {
|
|
g_assert(nalloc <= INT_MAX);
|
|
nalloc *= 2;
|
|
}
|
|
pollfds = g_renew(GPollFD, pollfds, nalloc);
|
|
nodes = g_renew(AioHandler *, nodes, nalloc);
|
|
}
|
|
nodes[npfd] = node;
|
|
pollfds[npfd] = (GPollFD) {
|
|
.fd = node->pfd.fd,
|
|
.events = node->pfd.events,
|
|
};
|
|
npfd++;
|
|
}
|
|
|
|
static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
|
|
int64_t timeout)
|
|
{
|
|
AioHandler *node;
|
|
int ret;
|
|
|
|
assert(npfd == 0);
|
|
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
|
|
add_pollfd(node);
|
|
}
|
|
}
|
|
|
|
/* epoll(7) is faster above a certain number of fds */
|
|
if (fdmon_epoll_try_upgrade(ctx, npfd)) {
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
|
|
g_source_remove_poll(&ctx->source, &node->pfd);
|
|
}
|
|
}
|
|
npfd = 0; /* we won't need pollfds[], reset npfd */
|
|
return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
|
|
}
|
|
|
|
ret = qemu_poll_ns(pollfds, npfd, timeout);
|
|
if (ret > 0) {
|
|
int i;
|
|
|
|
for (i = 0; i < npfd; i++) {
|
|
int revents = pollfds[i].revents;
|
|
|
|
if (revents) {
|
|
aio_add_ready_handler(ready_list, nodes[i], revents);
|
|
}
|
|
}
|
|
}
|
|
|
|
npfd = 0;
|
|
return ret;
|
|
}
|
|
|
|
static void fdmon_poll_update(AioContext *ctx,
|
|
AioHandler *old_node,
|
|
AioHandler *new_node)
|
|
{
|
|
if (old_node) {
|
|
/*
|
|
* If the GSource is in the process of being destroyed then
|
|
* g_source_remove_poll() causes an assertion failure. Skip removal in
|
|
* that case, because glib cleans up its state during destruction
|
|
* anyway.
|
|
*/
|
|
if (!g_source_is_destroyed(&ctx->source)) {
|
|
g_source_remove_poll(&ctx->source, &old_node->pfd);
|
|
}
|
|
}
|
|
|
|
if (new_node) {
|
|
g_source_add_poll(&ctx->source, &new_node->pfd);
|
|
}
|
|
}
|
|
|
|
static void fdmon_poll_gsource_prepare(AioContext *ctx)
|
|
{
|
|
/* Do nothing */
|
|
}
|
|
|
|
static bool fdmon_poll_gsource_check(AioContext *ctx)
|
|
{
|
|
AioHandler *node;
|
|
bool result = false;
|
|
|
|
/*
|
|
* We have to walk very carefully in case aio_set_fd_handler is
|
|
* called while we're walking.
|
|
*/
|
|
qemu_lockcnt_inc(&ctx->list_lock);
|
|
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
int revents = node->pfd.revents & node->pfd.events;
|
|
|
|
if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
|
|
result = true;
|
|
break;
|
|
}
|
|
if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
|
|
result = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
qemu_lockcnt_dec(&ctx->list_lock);
|
|
|
|
return result;
|
|
}
|
|
|
|
static void fdmon_poll_gsource_dispatch(AioContext *ctx,
|
|
AioHandlerList *ready_list)
|
|
{
|
|
AioHandler *node;
|
|
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
int revents = node->pfd.revents;
|
|
|
|
if (revents) {
|
|
aio_add_ready_handler(ready_list, node, revents);
|
|
}
|
|
}
|
|
}
|
|
|
|
const FDMonOps fdmon_poll_ops = {
|
|
.update = fdmon_poll_update,
|
|
.wait = fdmon_poll_wait,
|
|
.need_wait = aio_poll_disabled,
|
|
.gsource_prepare = fdmon_poll_gsource_prepare,
|
|
.gsource_check = fdmon_poll_gsource_check,
|
|
.gsource_dispatch = fdmon_poll_gsource_dispatch,
|
|
};
|
|
|
|
void fdmon_poll_downgrade(AioContext *ctx)
|
|
{
|
|
AioHandler *node;
|
|
|
|
ctx->fdmon_ops = &fdmon_poll_ops;
|
|
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
|
|
g_source_add_poll(&ctx->source, &node->pfd);
|
|
}
|
|
}
|
|
}
|
|
|