qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 35/40] block: explicitly acquire aiocontext in aio c


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 35/40] block: explicitly acquire aiocontext in aio callbacks that need it
Date: Tue, 24 Nov 2015 19:01:26 +0100

Signed-off-by: Paolo Bonzini <address@hidden>
---
 block/archipelago.c    |  3 ---
 block/blkdebug.c       |  4 ---
 block/blkverify.c      |  9 +++----
 block/block-backend.c  |  4 ---
 block/curl.c           |  2 +-
 block/io.c             | 13 +++++-----
 block/iscsi.c          |  2 --
 block/linux-aio.c      |  7 +++---
 block/mirror.c         | 12 ++++++---
 block/null.c           |  8 ------
 block/qed-cluster.c    |  2 ++
 block/qed-table.c      | 12 +++++++--
 block/qed.c            | 66 ++++++++++++++++++++++++++++++++++++++------------
 block/quorum.c         | 12 +++++++++
 block/rbd.c            |  4 ---
 block/win32-aio.c      |  2 --
 hw/block/virtio-blk.c  | 12 ++++++++-
 hw/scsi/scsi-disk.c    | 18 ++++++++++++++
 hw/scsi/scsi-generic.c | 20 ++++++++++++---
 thread-pool.c          | 12 ++++++++-
 20 files changed, 157 insertions(+), 67 deletions(-)

diff --git a/block/archipelago.c b/block/archipelago.c
index 7f69a3f..855655c 100644
--- a/block/archipelago.c
+++ b/block/archipelago.c
@@ -312,12 +312,9 @@ static void qemu_archipelago_complete_aio(void *opaque)
 {
     AIORequestData *reqdata = (AIORequestData *) opaque;
     ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
-    AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
 
     qemu_bh_delete(aio_cb->bh);
-    aio_context_acquire(ctx);
     aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
-    aio_context_release(ctx);
     aio_cb->status = 0;
 
     qemu_aio_unref(aio_cb);
diff --git a/block/blkdebug.c b/block/blkdebug.c
index ba35185..6860a2b 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -458,12 +458,8 @@ out:
 static void error_callback_bh(void *opaque)
 {
     struct BlkdebugAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
diff --git a/block/blkverify.c b/block/blkverify.c
index 3ff681a..74188f5 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -188,23 +188,22 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState 
*bs, bool is_write,
 static void blkverify_aio_bh(void *opaque)
 {
     BlkverifyAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     qemu_bh_delete(acb->bh);
     if (acb->buf) {
         qemu_iovec_destroy(&acb->raw_qiov);
         qemu_vfree(acb->buf);
     }
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
 static void blkverify_aio_cb(void *opaque, int ret)
 {
     BlkverifyAIOCB *acb = opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
+    aio_context_acquire(ctx);
     switch (++acb->done) {
     case 1:
         acb->ret = ret;
@@ -219,11 +218,11 @@ static void blkverify_aio_cb(void *opaque, int ret)
             acb->verify(acb);
         }
 
-        acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
-                             blkverify_aio_bh, acb);
+        acb->bh = aio_bh_new(ctx, blkverify_aio_bh, acb);
         qemu_bh_schedule(acb->bh);
         break;
     }
+    aio_context_release(ctx);
 }
 
 static void blkverify_verify_readv(BlkverifyAIOCB *acb)
diff --git a/block/block-backend.c b/block/block-backend.c
index 8549289..36ccc9e 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -637,12 +637,8 @@ int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
 static void error_callback_bh(void *opaque)
 {
     struct BlockBackendAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
diff --git a/block/curl.c b/block/curl.c
index c2b6726..7b5b17f 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -715,11 +715,11 @@ static void curl_readv_bh_cb(void *p)
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
 
 out:
+    aio_context_release(ctx);
     if (ret != -EINPROGRESS) {
         acb->common.cb(acb->common.opaque, ret);
         qemu_aio_unref(acb);
     }
-    aio_context_release(ctx);
 }
 
 static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
diff --git a/block/io.c b/block/io.c
index 4b3e2b2..9b30f96 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2036,15 +2036,12 @@ static const AIOCBInfo bdrv_em_aiocb_info = {
 static void bdrv_aio_bh_cb(void *opaque)
 {
     BlockAIOCBSync *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     if (!acb->is_write && acb->ret >= 0) {
         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
     }
     qemu_vfree(acb->bounce);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_bh_delete(acb->bh);
     acb->bh = NULL;
     qemu_aio_unref(acb);
@@ -2120,13 +2117,10 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
 static void bdrv_co_em_bh(void *opaque)
 {
     BlockAIOCBCoroutine *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     assert(!acb->need_bh);
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     bdrv_co_complete(acb);
-    aio_context_release(ctx);
 }
 
 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
@@ -2277,15 +2271,19 @@ void qemu_aio_unref(void *p)
 
 typedef struct CoroutineIOCompletion {
     Coroutine *coroutine;
+    AioContext *ctx;
     int ret;
 } CoroutineIOCompletion;
 
 static void bdrv_co_io_em_complete(void *opaque, int ret)
 {
     CoroutineIOCompletion *co = opaque;
+    AioContext *ctx = co->ctx;
 
     co->ret = ret;
+    aio_context_acquire(ctx);
     qemu_coroutine_enter(co->coroutine, NULL);
+    aio_context_release(ctx);
 }
 
 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
@@ -2294,6 +2292,7 @@ static int coroutine_fn bdrv_co_io_em(BlockDriverState 
*bs, int64_t sector_num,
 {
     CoroutineIOCompletion co = {
         .coroutine = qemu_coroutine_self(),
+        .ctx = bdrv_get_aio_context(bs),
     };
     BlockAIOCB *acb;
 
@@ -2367,6 +2366,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
         BlockAIOCB *acb;
         CoroutineIOCompletion co = {
             .coroutine = qemu_coroutine_self(),
+            .ctx = bdrv_get_aio_context(bs),
         };
 
         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
@@ -2497,6 +2497,7 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, 
int64_t sector_num,
             BlockAIOCB *acb;
             CoroutineIOCompletion co = {
                 .coroutine = qemu_coroutine_self(),
+                .ctx = bdrv_get_aio_context(bs),
             };
 
             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
diff --git a/block/iscsi.c b/block/iscsi.c
index 411aef8..a3dc06b 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -129,9 +129,7 @@ iscsi_bh_cb(void *p)
     g_free(acb->buf);
     acb->buf = NULL;
 
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->status);
-    aio_context_release(ctx);
 
     if (acb->task != NULL) {
         scsi_free_scsi_task(acb->task);
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 0e94a86..d061b8b 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -71,8 +71,7 @@ static inline ssize_t io_event_ret(struct io_event *ev)
 /*
  * Completes an AIO request (calls the callback and frees the ACB).
  */
-static void qemu_laio_process_completion(struct qemu_laio_state *s,
-    struct qemu_laiocb *laiocb)
+static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
 {
     int ret;
 
@@ -138,7 +137,9 @@ static void qemu_laio_completion_bh(void *opaque)
         laiocb->ret = io_event_ret(&s->events[s->event_idx]);
         s->event_idx++;
 
-        qemu_laio_process_completion(s, laiocb);
+        aio_context_release(s->aio_context);
+        qemu_laio_process_completion(laiocb);
+        aio_context_acquire(s->aio_context);
     }
 
     if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
diff --git a/block/mirror.c b/block/mirror.c
index 52c9abf..a8249d1 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -125,6 +125,8 @@ static void mirror_write_complete(void *opaque, int ret)
 {
     MirrorOp *op = opaque;
     MirrorBlockJob *s = op->s;
+
+    aio_context_acquire(bdrv_get_aio_context(s->common.bs));
     if (ret < 0) {
         BlockErrorAction action;
 
@@ -135,12 +137,15 @@ static void mirror_write_complete(void *opaque, int ret)
         }
     }
     mirror_iteration_done(op, ret);
+    aio_context_release(bdrv_get_aio_context(s->common.bs));
 }
 
 static void mirror_read_complete(void *opaque, int ret)
 {
     MirrorOp *op = opaque;
     MirrorBlockJob *s = op->s;
+
+    aio_context_acquire(bdrv_get_aio_context(s->common.bs));
     if (ret < 0) {
         BlockErrorAction action;
 
@@ -151,10 +156,11 @@ static void mirror_read_complete(void *opaque, int ret)
         }
 
         mirror_iteration_done(op, ret);
-        return;
+    } else {
+        bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
+                        mirror_write_complete, op);
     }
-    bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
-                    mirror_write_complete, op);
+    aio_context_release(bdrv_get_aio_context(s->common.bs));
 }
 
 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
diff --git a/block/null.c b/block/null.c
index 9bddc1b..7d08323 100644
--- a/block/null.c
+++ b/block/null.c
@@ -117,11 +117,7 @@ static const AIOCBInfo null_aiocb_info = {
 static void null_bh_cb(void *opaque)
 {
     NullAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, 0);
-    aio_context_release(ctx);
     qemu_bh_delete(acb->bh);
     qemu_aio_unref(acb);
 }
@@ -129,11 +125,7 @@ static void null_bh_cb(void *opaque)
 static void null_timer_cb(void *opaque)
 {
     NullAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, 0);
-    aio_context_release(ctx);
     timer_deinit(&acb->timer);
     qemu_aio_unref(acb);
 }
diff --git a/block/qed-cluster.c b/block/qed-cluster.c
index f64b2af..64ea4f2 100644
--- a/block/qed-cluster.c
+++ b/block/qed-cluster.c
@@ -82,6 +82,7 @@ static void qed_find_cluster_cb(void *opaque, int ret)
     unsigned int index;
     unsigned int n;
 
+    qed_acquire(s);
     if (ret) {
         goto out;
     }
@@ -108,6 +109,7 @@ static void qed_find_cluster_cb(void *opaque, int ret)
 
 out:
     find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
+    qed_release(s);
     g_free(find_cluster_cb);
 }
 
diff --git a/block/qed-table.c b/block/qed-table.c
index f4219b8..83bd5c2 100644
--- a/block/qed-table.c
+++ b/block/qed-table.c
@@ -29,6 +29,7 @@ static void qed_read_table_cb(void *opaque, int ret)
 {
     QEDReadTableCB *read_table_cb = opaque;
     QEDTable *table = read_table_cb->table;
+    BDRVQEDState *s = read_table_cb->s;
     int noffsets = read_table_cb->qiov.size / sizeof(uint64_t);
     int i;
 
@@ -38,13 +39,15 @@ static void qed_read_table_cb(void *opaque, int ret)
     }
 
     /* Byteswap offsets */
+    qed_acquire(s);
     for (i = 0; i < noffsets; i++) {
         table->offsets[i] = le64_to_cpu(table->offsets[i]);
     }
+    qed_release(s);
 
 out:
     /* Completion */
-    trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret);
+    trace_qed_read_table_cb(s, read_table_cb->table, ret);
     gencb_complete(&read_table_cb->gencb, ret);
 }
 
@@ -82,8 +85,9 @@ typedef struct {
 static void qed_write_table_cb(void *opaque, int ret)
 {
     QEDWriteTableCB *write_table_cb = opaque;
+    BDRVQEDState *s = write_table_cb->s;
 
-    trace_qed_write_table_cb(write_table_cb->s,
+    trace_qed_write_table_cb(s,
                              write_table_cb->orig_table,
                              write_table_cb->flush,
                              ret);
@@ -95,8 +99,10 @@ static void qed_write_table_cb(void *opaque, int ret)
     if (write_table_cb->flush) {
         /* We still need to flush first */
         write_table_cb->flush = false;
+        qed_acquire(s);
         bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
                        write_table_cb);
+        qed_release(s);
         return;
     }
 
@@ -215,6 +221,7 @@ static void qed_read_l2_table_cb(void *opaque, int ret)
     CachedL2Table *l2_table = request->l2_table;
     uint64_t l2_offset = read_l2_table_cb->l2_offset;
 
+    qed_acquire(s);
     if (ret) {
         /* can't trust loaded L2 table anymore */
         qed_unref_l2_cache_entry(l2_table);
@@ -230,6 +237,7 @@ static void qed_read_l2_table_cb(void *opaque, int ret)
         request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
         assert(request->l2_table != NULL);
     }
+    qed_release(s);
 
     gencb_complete(&read_l2_table_cb->gencb, ret);
 }
diff --git a/block/qed.c b/block/qed.c
index 17777d1..f4823e8 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -117,11 +117,13 @@ static void qed_write_header_read_cb(void *opaque, int 
ret)
     }
 
     /* Update header */
+    qed_acquire(s);
     qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
 
     bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
                     write_header_cb->nsectors, qed_write_header_cb,
                     write_header_cb);
+    qed_release(s);
 }
 
 /**
@@ -277,11 +279,19 @@ static void qed_aio_start_io(QEDAIOCB *acb)
     qed_aio_next_io(acb, 0);
 }
 
+static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
+{
+    return acb->common.bs->opaque;
+}
+
 static void qed_aio_next_io_cb(void *opaque, int ret)
 {
     QEDAIOCB *acb = opaque;
+    BDRVQEDState *s = acb_to_s(acb);
 
-    qed_aio_next_io(acb, ret);
+    qed_acquire(s);
+    qed_aio_next_io(opaque, ret);
+    qed_release(s);
 }
 
 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
@@ -314,23 +324,29 @@ static void qed_flush_after_clear_need_check(void 
*opaque, int ret)
 {
     BDRVQEDState *s = opaque;
 
+    qed_acquire(s);
     bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
 
     /* No need to wait until flush completes */
     qed_unplug_allocating_write_reqs(s);
+    qed_release(s);
 }
 
 static void qed_clear_need_check(void *opaque, int ret)
 {
     BDRVQEDState *s = opaque;
 
+    qed_acquire(s);
     if (ret) {
         qed_unplug_allocating_write_reqs(s);
-        return;
+        goto out;
     }
 
     s->header.features &= ~QED_F_NEED_CHECK;
     qed_write_header(s, qed_flush_after_clear_need_check, s);
+
+out:
+    qed_release(s);
 }
 
 static void qed_need_check_timer_cb(void *opaque)
@@ -773,11 +789,6 @@ static int64_t coroutine_fn 
bdrv_qed_co_get_block_status(BlockDriverState *bs,
     return cb.status;
 }
 
-static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
-{
-    return acb->common.bs->opaque;
-}
-
 /**
  * Read from the backing file or zero-fill if no backing file
  *
@@ -868,10 +879,12 @@ static void qed_copy_from_backing_file_write(void 
*opaque, int ret)
         return;
     }
 
+    qed_acquire(s);
     BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
     bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
                     &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
                     qed_copy_from_backing_file_cb, copy_cb);
+    qed_release(s);
 }
 
 /**
@@ -937,7 +950,6 @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable 
*table, int index,
 static void qed_aio_complete_bh(void *opaque)
 {
     QEDAIOCB *acb = opaque;
-    BDRVQEDState *s = acb_to_s(acb);
     BlockCompletionFunc *cb = acb->common.cb;
     void *user_opaque = acb->common.opaque;
     int ret = acb->bh_ret;
@@ -946,9 +958,7 @@ static void qed_aio_complete_bh(void *opaque)
     qemu_aio_unref(acb);
 
     /* Invoke callback */
-    qed_acquire(s);
     cb(user_opaque, ret);
-    qed_release(s);
 }
 
 static void qed_aio_complete(QEDAIOCB *acb, int ret)
@@ -1000,6 +1010,7 @@ static void qed_commit_l2_update(void *opaque, int ret)
     CachedL2Table *l2_table = acb->request.l2_table;
     uint64_t l2_offset = l2_table->offset;
 
+    qed_acquire(s);
     qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
 
     /* This is guaranteed to succeed because we just committed the entry to the
@@ -1009,6 +1020,7 @@ static void qed_commit_l2_update(void *opaque, int ret)
     assert(acb->request.l2_table != NULL);
 
     qed_aio_next_io(acb, ret);
+    qed_release(s);
 }
 
 /**
@@ -1020,15 +1032,18 @@ static void qed_aio_write_l1_update(void *opaque, int 
ret)
     BDRVQEDState *s = acb_to_s(acb);
     int index;
 
+    qed_acquire(s);
     if (ret) {
         qed_aio_complete(acb, ret);
-        return;
+        goto out;
     }
 
     index = qed_l1_index(s, acb->cur_pos);
     s->l1_table->offsets[index] = acb->request.l2_table->offset;
 
     qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
+out:
+    qed_release(s);
 }
 
 /**
@@ -1071,7 +1086,11 @@ err:
 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
 {
     QEDAIOCB *acb = opaque;
+    BDRVQEDState *s = acb_to_s(acb);
+
+    qed_acquire(s);
     qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
+    qed_release(s);
 }
 
 /**
@@ -1088,9 +1107,11 @@ static void qed_aio_write_flush_before_l2_update(void 
*opaque, int ret)
     QEDAIOCB *acb = opaque;
     BDRVQEDState *s = acb_to_s(acb);
 
+    qed_acquire(s);
     if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
         qed_aio_complete(acb, -EIO);
     }
+    qed_release(s);
 }
 
 /**
@@ -1106,9 +1127,10 @@ static void qed_aio_write_main(void *opaque, int ret)
 
     trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
 
+    qed_acquire(s);
     if (ret) {
         qed_aio_complete(acb, ret);
-        return;
+        goto out;
     }
 
     if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
@@ -1125,6 +1147,8 @@ static void qed_aio_write_main(void *opaque, int ret)
     bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
                     &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
                     next_fn, acb);
+out:
+    qed_release(s);
 }
 
 /**
@@ -1141,14 +1165,17 @@ static void qed_aio_write_postfill(void *opaque, int 
ret)
                       qed_offset_into_cluster(s, acb->cur_pos) +
                       acb->cur_qiov.size;
 
+    qed_acquire(s);
     if (ret) {
         qed_aio_complete(acb, ret);
-        return;
+        goto out;
     }
 
     trace_qed_aio_write_postfill(s, acb, start, len, offset);
     qed_copy_from_backing_file(s, start, len, offset,
                                 qed_aio_write_main, acb);
+out:
+    qed_release(s);
 }
 
 /**
@@ -1161,9 +1188,11 @@ static void qed_aio_write_prefill(void *opaque, int ret)
     uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
     uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
 
+    qed_acquire(s);
     trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
     qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
                                 qed_aio_write_postfill, acb);
+    qed_release(s);
 }
 
 /**
@@ -1182,13 +1211,17 @@ static bool qed_should_set_need_check(BDRVQEDState *s)
 static void qed_aio_write_zero_cluster(void *opaque, int ret)
 {
     QEDAIOCB *acb = opaque;
+    BDRVQEDState *s = acb_to_s(acb);
 
+    qed_acquire(s);
     if (ret) {
         qed_aio_complete(acb, ret);
-        return;
+        goto out;
     }
 
     qed_aio_write_l2_update(acb, 0, 1);
+out:
+    qed_release(s);
 }
 
 /**
@@ -1447,6 +1480,7 @@ static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState 
*bs,
 }
 
 typedef struct {
+    BDRVQEDState *s;
     Coroutine *co;
     int ret;
     bool done;
@@ -1459,7 +1493,9 @@ static void coroutine_fn qed_co_write_zeroes_cb(void 
*opaque, int ret)
     cb->done = true;
     cb->ret = ret;
     if (cb->co) {
+        qed_acquire(cb->s);
         qemu_coroutine_enter(cb->co, NULL);
+        qed_release(cb->s);
     }
 }
 
@@ -1470,7 +1506,7 @@ static int coroutine_fn 
bdrv_qed_co_write_zeroes(BlockDriverState *bs,
 {
     BlockAIOCB *blockacb;
     BDRVQEDState *s = bs->opaque;
-    QEDWriteZeroesCB cb = { .done = false };
+    QEDWriteZeroesCB cb = { .s = s, .done = false };
     QEMUIOVector qiov;
     struct iovec iov;
 
diff --git a/block/quorum.c b/block/quorum.c
index d7a0f11..7b451f1 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -149,6 +149,7 @@ static AIOCBInfo quorum_aiocb_info = {
     .cancel_async       = quorum_aio_cancel,
 };
 
+/* Called _without_ acquiring AioContext.  */
 static void quorum_aio_finalize(QuorumAIOCB *acb)
 {
     int i, ret = 0;
@@ -276,12 +277,15 @@ static void quorum_fifo_aio_cb(void *opaque, int ret)
     QuorumChildRequest *sacb = opaque;
     QuorumAIOCB *acb = sacb->parent;
     BDRVQuorumState *s = acb->common.bs->opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
+    aio_context_acquire(ctx);
     assert(acb->is_read && s->read_pattern == QUORUM_READ_PATTERN_FIFO);
 
     /* We try to read next child in FIFO order if we fail to read */
     if (ret < 0 && ++acb->child_iter < s->num_children) {
         read_fifo_child(acb);
+        aio_context_release(ctx);
         return;
     }
 
@@ -292,6 +296,7 @@ static void quorum_fifo_aio_cb(void *opaque, int ret)
 
     /* FIXME: rewrite failed children if acb->child_iter > 0? */
 
+    aio_context_release(ctx);
     quorum_aio_finalize(acb);
 }
 
@@ -300,8 +305,11 @@ static void quorum_aio_cb(void *opaque, int ret)
     QuorumChildRequest *sacb = opaque;
     QuorumAIOCB *acb = sacb->parent;
     BDRVQuorumState *s = acb->common.bs->opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
     bool rewrite = false;
 
+    aio_context_acquire(ctx);
+
     sacb->ret = ret;
     acb->count++;
     if (ret == 0) {
@@ -311,7 +319,9 @@ static void quorum_aio_cb(void *opaque, int ret)
     }
     assert(acb->count <= s->num_children);
     assert(acb->success_count <= s->num_children);
+
     if (acb->count < s->num_children) {
+        aio_context_release(ctx);
         return;
     }
 
@@ -322,6 +332,8 @@ static void quorum_aio_cb(void *opaque, int ret)
         quorum_has_too_much_io_failed(acb);
     }
 
+    aio_context_release(ctx);
+
     /* if no rewrite is done the code will finish right away */
     if (!rewrite) {
         quorum_aio_finalize(acb);
diff --git a/block/rbd.c b/block/rbd.c
index 6206dc3..a60a19d 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -376,7 +376,6 @@ static int qemu_rbd_create(const char *filename, QemuOpts 
*opts, Error **errp)
 static void qemu_rbd_complete_aio(RADOSCB *rcb)
 {
     RBDAIOCB *acb = rcb->acb;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
     int64_t r;
 
     r = rcb->ret;
@@ -409,10 +408,7 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
     }
     qemu_vfree(acb->bounce);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
-    aio_context_release(ctx);
 
     qemu_aio_unref(acb);
 }
diff --git a/block/win32-aio.c b/block/win32-aio.c
index 85aac85..16270ca 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -87,9 +87,7 @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
     }
 
 
-    aio_context_acquire(s->aio_ctx);
     waiocb->common.cb(waiocb->common.opaque, ret);
-    aio_context_release(s->aio_ctx);
     qemu_aio_unref(waiocb);
 }
 
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 5c1cb89..f05c84a 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -86,7 +86,9 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, 
int error,
 static void virtio_blk_rw_complete(void *opaque, int ret)
 {
     VirtIOBlockReq *next = opaque;
+    VirtIOBlock *s = next->dev;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
     while (next) {
         VirtIOBlockReq *req = next;
         next = req->mr_next;
@@ -119,21 +121,27 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
         block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
         virtio_blk_free_request(req);
     }
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
 static void virtio_blk_flush_complete(void *opaque, int ret)
 {
     VirtIOBlockReq *req = opaque;
+    VirtIOBlock *s = req->dev;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
     if (ret) {
         if (virtio_blk_handle_rw_error(req, -ret, 0)) {
-            return;
+            goto out;
         }
     }
 
     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
     block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
     virtio_blk_free_request(req);
+
+out:
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
 #ifdef __linux__
@@ -180,8 +188,10 @@ static void virtio_blk_ioctl_complete(void *opaque, int 
status)
     virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
 
 out:
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
     virtio_blk_req_complete(req, status);
     virtio_blk_free_request(req);
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
     g_free(ioctl_req);
 }
 
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 4797d83..57ce1a0 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -169,6 +169,8 @@ static void scsi_aio_complete(void *opaque, int ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
         goto done;
@@ -184,6 +186,7 @@ static void scsi_aio_complete(void *opaque, int ret)
     scsi_req_complete(&r->req, GOOD);
 
 done:
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
     scsi_req_unref(&r->req);
 }
 
@@ -273,12 +276,14 @@ static void scsi_dma_complete(void *opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (ret < 0) {
         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
     } else {
         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     }
     scsi_dma_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_read_complete(void * opaque, int ret)
@@ -289,6 +294,8 @@ static void scsi_read_complete(void * opaque, int ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
         goto done;
@@ -310,6 +317,7 @@ static void scsi_read_complete(void * opaque, int ret)
 
 done:
     scsi_req_unref(&r->req);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 /* Actually issue a read to the block device.  */
@@ -359,12 +367,14 @@ static void scsi_do_read_cb(void *opaque, int ret)
     assert (r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (ret < 0) {
         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
     } else {
         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     }
     scsi_do_read(opaque, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 /* Read more data from scsi device into buffer.  */
@@ -492,12 +502,14 @@ static void scsi_write_complete(void * opaque, int ret)
     assert (r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (ret < 0) {
         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
     } else {
         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     }
     scsi_write_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_write_data(SCSIRequest *req)
@@ -1640,11 +1652,14 @@ static void scsi_unmap_complete(void *opaque, int ret)
 {
     UnmapCBData *data = opaque;
     SCSIDiskReq *r = data->r;
+    SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     scsi_unmap_complete_noio(data, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
@@ -1711,6 +1726,8 @@ static void scsi_write_same_complete(void *opaque, int 
ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
         goto done;
@@ -1746,6 +1763,7 @@ done:
     scsi_req_unref(&r->req);
     qemu_vfree(data->iov.iov_base);
     g_free(data);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index a4626f7..1b0c7e9 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -145,10 +145,14 @@ done:
 static void scsi_command_complete(void *opaque, int ret)
 {
     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
+    SCSIDevice *s = r->req.dev;
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
     scsi_command_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 static int execute_command(BlockBackend *blk,
@@ -184,9 +188,11 @@ static void scsi_read_complete(void * opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
     if (ret || r->req.io_canceled) {
         scsi_command_complete_noio(r, ret);
-        return;
+        goto done;
     }
 
     len = r->io_header.dxfer_len - r->io_header.resid;
@@ -195,7 +201,7 @@ static void scsi_read_complete(void * opaque, int ret)
     r->len = -1;
     if (len == 0) {
         scsi_command_complete_noio(r, 0);
-        return;
+        goto done;
     }
 
     /* Snoop READ CAPACITY output to set the blocksize.  */
@@ -226,6 +232,9 @@ static void scsi_read_complete(void * opaque, int ret)
     }
     scsi_req_data(&r->req, len);
     scsi_req_unref(&r->req);
+
+done:
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 /* Read more data from scsi device into buffer.  */
@@ -261,9 +270,11 @@ static void scsi_write_complete(void * opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
     if (ret || r->req.io_canceled) {
         scsi_command_complete_noio(r, ret);
-        return;
+        goto done;
     }
 
     if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
@@ -273,6 +284,9 @@ static void scsi_write_complete(void * opaque, int ret)
     }
 
     scsi_command_complete_noio(r, ret);
+
+done:
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 /* Write data to a scsi device.  Returns nonzero on failure.
diff --git a/thread-pool.c b/thread-pool.c
index bffd823..e923544 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -185,7 +185,9 @@ restart:
              */
             qemu_bh_schedule(pool->completion_bh);
 
+            aio_context_release(pool->ctx);
             elem->common.cb(elem->common.opaque, elem->ret);
+            aio_context_acquire(pool->ctx);
             qemu_aio_unref(elem);
             goto restart;
         } else {
@@ -261,21 +263,29 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
 
 typedef struct ThreadPoolCo {
     Coroutine *co;
+    AioContext *ctx;
     int ret;
 } ThreadPoolCo;
 
 static void thread_pool_co_cb(void *opaque, int ret)
 {
     ThreadPoolCo *co = opaque;
+    AioContext *ctx = co->ctx;
 
     co->ret = ret;
+    aio_context_acquire(ctx);
     qemu_coroutine_enter(co->co, NULL);
+    aio_context_release(ctx);
 }
 
 int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
                                        void *arg)
 {
-    ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS };
+    ThreadPoolCo tpc = {
+        .co = qemu_coroutine_self(),
+        .ctx = pool->ctx,
+        .ret = -EINPROGRESS
+    };
     assert(qemu_in_coroutine());
     thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc);
     qemu_coroutine_yield();
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]