qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH 14/18] block: explicitly acquire aiocontext in timer


From: Paolo Bonzini
Subject: [Qemu-block] [PATCH 14/18] block: explicitly acquire aiocontext in timers that need it
Date: Thu, 6 Aug 2015 15:36:12 +0200

Signed-off-by: Paolo Bonzini <address@hidden>
---
 aio-posix.c             | 2 --
 aio-win32.c             | 2 --
 block/curl.c            | 2 ++
 block/iscsi.c           | 2 ++
 block/null.c            | 4 ++++
 block/qed.c             | 2 ++
 block/throttle-groups.c | 2 ++
 qemu-coroutine-sleep.c  | 5 +++++
 8 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index 58f0937..6cc7ada 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -189,9 +189,7 @@ bool aio_dispatch(AioContext *ctx)
     qemu_lockcnt_dec(&ctx->list_lock);
 
     /* Run our timers */
-    aio_context_acquire(ctx);
     progress |= timerlistgroup_run_timers(&ctx->tlg);
-    aio_context_release(ctx);
 
     return progress;
 }
diff --git a/aio-win32.c b/aio-win32.c
index f6608b3..ed0306a 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -366,9 +366,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
         progress |= aio_dispatch_handlers(ctx, event);
     } while (count > 0);
 
-    aio_context_acquire(ctx);
     progress |= timerlistgroup_run_timers(&ctx->tlg);
-    aio_context_release(ctx);
 
     return progress;
 }
diff --git a/block/curl.c b/block/curl.c
index 446a6d9..fc70b01 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -374,9 +374,11 @@ static void curl_multi_timeout_do(void *arg)
         return;
     }
 
+    aio_context_acquire(s->aio_context);
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
 
     curl_multi_check_completion(s);
+    aio_context_release(s->aio_context);
 #else
     abort();
 #endif
diff --git a/block/iscsi.c b/block/iscsi.c
index 9948e70..2a4e6bc 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -1155,6 +1155,7 @@ static void iscsi_nop_timed_event(void *opaque)
 {
     IscsiLun *iscsilun = opaque;
 
+    aio_context_acquire(iscsilun->aio_context);
     if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
         error_report("iSCSI: NOP timeout. Reconnecting...");
         iscsilun->request_timed_out = true;
@@ -1165,6 +1166,7 @@ static void iscsi_nop_timed_event(void *opaque)
 
     timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 
NOP_INTERVAL);
     iscsi_set_events(iscsilun);
+    aio_context_release(iscsilun->aio_context);
 }
 
 static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
diff --git a/block/null.c b/block/null.c
index dd1b170..9bddc1b 100644
--- a/block/null.c
+++ b/block/null.c
@@ -129,7 +129,11 @@ static void null_bh_cb(void *opaque)
 static void null_timer_cb(void *opaque)
 {
     NullAIOCB *acb = opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
+    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, 0);
+    aio_context_release(ctx);
     timer_deinit(&acb->timer);
     qemu_aio_unref(acb);
 }
diff --git a/block/qed.c b/block/qed.c
index d47d7e1..adc9b73 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -330,10 +330,12 @@ static void qed_need_check_timer_cb(void *opaque)
 
     trace_qed_need_check_timer_cb(s);
 
+    aio_context_acquire(bdrv_get_aio_context(s->bs));
     qed_plug_allocating_write_reqs(s);
 
     /* Ensure writes are on disk before clearing flag */
     bdrv_aio_flush(s->bs, qed_clear_need_check, s);
+    aio_context_release(bdrv_get_aio_context(s->bs));
 }
 
 static void qed_start_need_check_timer(BDRVQEDState *s)
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 1abc6fc..9d8a620 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -369,7 +369,9 @@ static void timer_cb(BlockDriverState *bs, bool is_write)
     qemu_mutex_unlock(&tg->lock);
 
     /* Run the request that was waiting for this timer */
+    aio_context_acquire(bdrv_get_aio_context(bs));
     empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]);
+    aio_context_release(bdrv_get_aio_context(bs));
 
     /* If the request queue was empty then we have to take care of
      * scheduling the next one */
diff --git a/qemu-coroutine-sleep.c b/qemu-coroutine-sleep.c
index 9abb7fd..6014e7c 100644
--- a/qemu-coroutine-sleep.c
+++ b/qemu-coroutine-sleep.c
@@ -18,13 +18,17 @@
 typedef struct CoSleepCB {
     QEMUTimer *ts;
     Coroutine *co;
+    AioContext *ctx;
 } CoSleepCB;
 
 static void co_sleep_cb(void *opaque)
 {
     CoSleepCB *sleep_cb = opaque;
+    AioContext *ctx = sleep_cb->ctx;
 
+    aio_context_acquire(ctx);
     qemu_coroutine_enter(sleep_cb->co, NULL);
+    aio_context_release(ctx);
 }
 
 void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
@@ -32,6 +36,7 @@ void coroutine_fn co_aio_sleep_ns(AioContext *ctx, 
QEMUClockType type,
 {
     CoSleepCB sleep_cb = {
         .co = qemu_coroutine_self(),
+        .ctx = ctx,
     };
     sleep_cb.ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, &sleep_cb);
     timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
-- 
2.4.3





reply via email to

[Prev in Thread] Current Thread [Next in Thread]