[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 8/9] aio: convert aio_poll() to g_poll(3)
From: |
Stefan Hajnoczi |
Subject: |
[Qemu-devel] [PATCH v2 8/9] aio: convert aio_poll() to g_poll(3) |
Date: |
Fri, 1 Feb 2013 14:53:27 +0100 |
AioHandler already has a GPollFD so we can directly use its
events/revents.
Add the int pollfds_idx field to AioContext so we can map g_poll(3)
results back to AioHandlers.
Reuse aio_dispatch() to invoke handlers after g_poll(3).
Signed-off-by: Stefan Hajnoczi <address@hidden>
---
aio-posix.c | 67 +++++++++++++++++++----------------------------------
async.c | 2 ++
include/block/aio.h | 3 +++
3 files changed, 29 insertions(+), 43 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c
index 35131a3..7769927 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -25,6 +25,7 @@ struct AioHandler
IOHandler *io_write;
AioFlushHandler *io_flush;
int deleted;
+ int pollfds_idx;
void *opaque;
QLIST_ENTRY(AioHandler) node;
};
@@ -85,6 +86,7 @@ void aio_set_fd_handler(AioContext *ctx,
node->io_write = io_write;
node->io_flush = io_flush;
node->opaque = opaque;
+ node->pollfds_idx = -1;
node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
node->pfd.events |= (io_write ? G_IO_OUT : 0);
@@ -177,10 +179,7 @@ static bool aio_dispatch(AioContext *ctx)
bool aio_poll(AioContext *ctx, bool blocking)
{
- static struct timeval tv0;
AioHandler *node;
- fd_set rdfds, wrfds;
- int max_fd = -1;
int ret;
bool busy, progress;
@@ -206,12 +205,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers++;
- FD_ZERO(&rdfds);
- FD_ZERO(&wrfds);
+ g_array_set_size(ctx->pollfds, 0);
- /* fill fd sets */
+ /* fill pollfds */
busy = false;
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ node->pollfds_idx = -1;
+
/* If there aren't pending AIO operations, don't invoke callbacks.
* Otherwise, if there are no AIO requests, qemu_aio_wait() would
* wait indefinitely.
@@ -222,13 +222,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
busy = true;
}
- if (!node->deleted && node->io_read) {
- FD_SET(node->pfd.fd, &rdfds);
- max_fd = MAX(max_fd, node->pfd.fd + 1);
- }
- if (!node->deleted && node->io_write) {
- FD_SET(node->pfd.fd, &wrfds);
- max_fd = MAX(max_fd, node->pfd.fd + 1);
+ if (!node->deleted && node->pfd.events) {
+ GPollFD pfd = {
+ .fd = node->pfd.fd,
+ .events = node->pfd.events,
+ };
+ node->pollfds_idx = ctx->pollfds->len;
+ g_array_append_val(ctx->pollfds, pfd);
}
}
@@ -240,41 +240,22 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
/* wait until next event */
- ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
+ ret = g_poll((GPollFD *)ctx->pollfds->data,
+ ctx->pollfds->len,
+ blocking ? -1 : 0);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
- /* we have to walk very carefully in case
- * qemu_aio_set_fd_handler is called while we're walking */
- node = QLIST_FIRST(&ctx->aio_handlers);
- while (node) {
- AioHandler *tmp;
-
- ctx->walking_handlers++;
-
- if (!node->deleted &&
- FD_ISSET(node->pfd.fd, &rdfds) &&
- node->io_read) {
- node->io_read(node->opaque);
- progress = true;
- }
- if (!node->deleted &&
- FD_ISSET(node->pfd.fd, &wrfds) &&
- node->io_write) {
- node->io_write(node->opaque);
- progress = true;
- }
-
- tmp = node;
- node = QLIST_NEXT(node, node);
-
- ctx->walking_handlers--;
-
- if (!ctx->walking_handlers && tmp->deleted) {
- QLIST_REMOVE(tmp, node);
- g_free(tmp);
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ if (node->pollfds_idx != -1) {
+ GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
+ node->pollfds_idx);
+ node->pfd.revents |= pfd->revents;
}
}
+ if (aio_dispatch(ctx)) {
+ progress = true;
+ }
}
assert(progress || busy);
diff --git a/async.c b/async.c
index 72d268a..f2d47ba 100644
--- a/async.c
+++ b/async.c
@@ -174,6 +174,7 @@ aio_ctx_finalize(GSource *source)
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
+ g_array_free(ctx->pollfds, TRUE);
}
static GSourceFuncs aio_source_funcs = {
@@ -198,6 +199,7 @@ AioContext *aio_context_new(void)
{
AioContext *ctx;
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
+ ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
event_notifier_init(&ctx->notifier, false);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
diff --git a/include/block/aio.h b/include/block/aio.h
index 8eda924..5b54d38 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -63,6 +63,9 @@ typedef struct AioContext {
/* Used for aio_notify. */
EventNotifier notifier;
+
+ /* GPollFDs for aio_poll() */
+ GArray *pollfds;
} AioContext;
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
--
1.8.1
- [Qemu-devel] [PATCH v2 0/9] main-loop: switch to g_poll(3) on POSIX hosts, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 1/9] main-loop: fix select_ret uninitialized variable warning, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 2/9] main-loop: switch to g_poll() on POSIX hosts, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 3/9] main-loop: switch POSIX glib integration to GPollFD, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 4/9] slirp: switch to GPollFD, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 6/9] main-loop: drop rfds/wfds/xfds for good, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 5/9] iohandler: switch to GPollFD, Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 7/9] aio: extract aio_dispatch() from aio_poll(), Stefan Hajnoczi, 2013/02/01
- [Qemu-devel] [PATCH v2 8/9] aio: convert aio_poll() to g_poll(3),
Stefan Hajnoczi <=
- [Qemu-devel] [PATCH v2 9/9] aio: support G_IO_HUP and G_IO_ERR, Stefan Hajnoczi, 2013/02/01