qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 38/40] aio-posix: partially inline aio_dispatch into


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 38/40] aio-posix: partially inline aio_dispatch into aio_poll
Date: Tue, 24 Nov 2015 19:01:29 +0100

This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
Extract the dispatching loop for file descriptor handlers into a new
function aio_dispatch_handlers, and then inline aio_dispatch into
aio_poll.

aio_dispatch can now become void.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 aio-posix.c         | 41 +++++++++++++++++------------------------
 aio-win32.c         | 11 ++++-------
 include/block/aio.h |  2 +-
 3 files changed, 22 insertions(+), 32 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index aabc4ae..b372816 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -305,26 +305,11 @@ bool aio_pending(AioContext *ctx)
     return result;
 }
 
-bool aio_dispatch(AioContext *ctx)
+static bool aio_dispatch_handlers(AioContext *ctx)
 {
     AioHandler *node, *tmp;
     bool progress = false;
 
-    /*
-     * If there are callbacks left that have been queued, we need to call them.
-     * Do not call select in this case, because it is possible that the caller
-     * does not need a complete flush (as is the case for aio_poll loops).
-     */
-    if (aio_bh_poll(ctx)) {
-        progress = true;
-    }
-
-    /*
-     * We have to walk very carefully in case aio_set_fd_handler is
-     * called while we're walking.
-     */
-    qemu_lockcnt_inc(&ctx->list_lock);
-
     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
         int revents;
 
@@ -356,12 +341,18 @@ bool aio_dispatch(AioContext *ctx)
         }
     }
 
-    qemu_lockcnt_dec(&ctx->list_lock);
+    return progress;
+}
 
-    /* Run our timers */
-    progress |= timerlistgroup_run_timers(&ctx->tlg);
+void aio_dispatch(AioContext *ctx)
+{
+    aio_bh_poll(ctx);
 
-    return progress;
+    qemu_lockcnt_inc(&ctx->list_lock);
+    aio_dispatch_handlers(ctx);
+    qemu_lockcnt_dec(&ctx->list_lock);
+
+    timerlistgroup_run_timers(&ctx->tlg);
 }
 
 /* These thread-local variables are used only in a small part of aio_poll
@@ -472,11 +463,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
     npfd = 0;
     qemu_lockcnt_dec(&ctx->list_lock);
 
-    /* Run dispatch even if there were no readable fds to run timers */
-    if (aio_dispatch(ctx)) {
-        progress = true;
-    }
+    progress |= aio_bh_poll(ctx);
+
+    qemu_lockcnt_inc(&ctx->list_lock);
+    progress |= aio_dispatch_handlers(ctx);
+    qemu_lockcnt_dec(&ctx->list_lock);
 
+    progress |= timerlistgroup_run_timers(&ctx->tlg);
     return progress;
 }
 
diff --git a/aio-win32.c b/aio-win32.c
index 4479d3f..feffdc4 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -286,14 +286,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE 
event)
     return progress;
 }
 
-bool aio_dispatch(AioContext *ctx)
+void aio_dispatch(AioContext *ctx)
 {
-    bool progress;
-
-    progress = aio_bh_poll(ctx);
-    progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
-    progress |= timerlistgroup_run_timers(&ctx->tlg);
-    return progress;
+    aio_bh_poll(ctx);
+    aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
+    timerlistgroup_run_timers(&ctx->tlg);
 }
 
 bool aio_poll(AioContext *ctx, bool blocking)
diff --git a/include/block/aio.h b/include/block/aio.h
index 21044af..05a41b2 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -277,7 +277,7 @@ bool aio_pending(AioContext *ctx);
  *
  * This is used internally in the implementation of the GSource.
  */
-bool aio_dispatch(AioContext *ctx);
+void aio_dispatch(AioContext *ctx);
 
 /* Progress in completing AIO work to occur.  This can issue new pending
  * aio as a result of executing I/O completion or bh callbacks.
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]