[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 22/32] block: drain from main loop thread in bdrv_co_yield_to_drai
From: |
Kevin Wolf |
Subject: |
[PULL 22/32] block: drain from main loop thread in bdrv_co_yield_to_drain() |
Date: |
Tue, 30 May 2023 18:32:29 +0200 |
From: Stefan Hajnoczi <stefanha@redhat.com>
For simplicity, always run BlockDevOps .drained_begin/end/poll()
callbacks in the main loop thread. This makes it easier to implement the
callbacks and avoids extra locks.
Move the function pointer declarations from the I/O Code section to the
Global State section for BlockDevOps, BdrvChildClass, and BlockDriver.
Narrow IO_OR_GS_CODE() to GLOBAL_STATE_CODE() where appropriate.
The test-bdrv-drain test case calls bdrv_drain() from an IOThread. This
is now only allowed from coroutine context, so update the test case to
run in a coroutine.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230516190238.8401-11-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
include/block/block_int-common.h | 72 +++++++++++++--------------
include/sysemu/block-backend-common.h | 25 +++++-----
block/io.c | 14 ++++--
tests/unit/test-bdrv-drain.c | 14 +++---
4 files changed, 67 insertions(+), 58 deletions(-)
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
index 6492a1e538..b1cbc1e00c 100644
--- a/include/block/block_int-common.h
+++ b/include/block/block_int-common.h
@@ -363,6 +363,21 @@ struct BlockDriver {
void (*bdrv_attach_aio_context)(BlockDriverState *bs,
AioContext *new_context);
+ /**
+ * bdrv_drain_begin is called if implemented in the beginning of a
+ * drain operation to drain and stop any internal sources of requests in
+ * the driver.
+ * bdrv_drain_end is called if implemented at the end of the drain.
+ *
+ * They should be used by the driver to e.g. manage scheduled I/O
+ * requests, or toggle an internal state. After the end of the drain new
+ * requests will continue normally.
+ *
+ * Implementations of both functions must not call aio_poll().
+ */
+ void (*bdrv_drain_begin)(BlockDriverState *bs);
+ void (*bdrv_drain_end)(BlockDriverState *bs);
+
/**
* Try to get @bs's logical and physical block size.
* On success, store them in @bsz and return zero.
@@ -758,21 +773,6 @@ struct BlockDriver {
void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_io_unplug)(
BlockDriverState *bs);
- /**
- * bdrv_drain_begin is called if implemented in the beginning of a
- * drain operation to drain and stop any internal sources of requests in
- * the driver.
- * bdrv_drain_end is called if implemented at the end of the drain.
- *
- * They should be used by the driver to e.g. manage scheduled I/O
- * requests, or toggle an internal state. After the end of the drain new
- * requests will continue normally.
- *
- * Implementations of both functions must not call aio_poll().
- */
- void (*bdrv_drain_begin)(BlockDriverState *bs);
- void (*bdrv_drain_end)(BlockDriverState *bs);
-
bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_can_store_new_dirty_bitmap)(
@@ -955,6 +955,27 @@ struct BdrvChildClass {
void GRAPH_WRLOCK_PTR (*attach)(BdrvChild *child);
void GRAPH_WRLOCK_PTR (*detach)(BdrvChild *child);
+ /*
+ * If this pair of functions is implemented, the parent doesn't issue new
+ * requests after returning from .drained_begin() until .drained_end() is
+ * called.
+ *
+ * These functions must not change the graph (and therefore also must not
+ * call aio_poll(), which could change the graph indirectly).
+ *
+ * Note that this can be nested. If drained_begin() was called twice, new
+ * I/O is allowed only after drained_end() was called twice, too.
+ */
+ void (*drained_begin)(BdrvChild *child);
+ void (*drained_end)(BdrvChild *child);
+
+ /*
+ * Returns whether the parent has pending requests for the child. This
+ * callback is polled after .drained_begin() has been called until all
+ * activity on the child has stopped.
+ */
+ bool (*drained_poll)(BdrvChild *child);
+
/*
* Notifies the parent that the filename of its child has changed (e.g.
* because the direct child was removed from the backing chain), so that it
@@ -984,27 +1005,6 @@ struct BdrvChildClass {
const char *(*get_name)(BdrvChild *child);
AioContext *(*get_parent_aio_context)(BdrvChild *child);
-
- /*
- * If this pair of functions is implemented, the parent doesn't issue new
- * requests after returning from .drained_begin() until .drained_end() is
- * called.
- *
- * These functions must not change the graph (and therefore also must not
- * call aio_poll(), which could change the graph indirectly).
- *
- * Note that this can be nested. If drained_begin() was called twice, new
- * I/O is allowed only after drained_end() was called twice, too.
- */
- void (*drained_begin)(BdrvChild *child);
- void (*drained_end)(BdrvChild *child);
-
- /*
- * Returns whether the parent has pending requests for the child. This
- * callback is polled after .drained_begin() has been called until all
- * activity on the child has stopped.
- */
- bool (*drained_poll)(BdrvChild *child);
};
extern const BdrvChildClass child_of_bds;
diff --git a/include/sysemu/block-backend-common.h
b/include/sysemu/block-backend-common.h
index 2391679c56..780cea7305 100644
--- a/include/sysemu/block-backend-common.h
+++ b/include/sysemu/block-backend-common.h
@@ -59,6 +59,19 @@ typedef struct BlockDevOps {
*/
bool (*is_medium_locked)(void *opaque);
+ /*
+ * Runs when the backend receives a drain request.
+ */
+ void (*drained_begin)(void *opaque);
+ /*
+ * Runs when the backend's last drain request ends.
+ */
+ void (*drained_end)(void *opaque);
+ /*
+ * Is the device still busy?
+ */
+ bool (*drained_poll)(void *opaque);
+
/*
* I/O API functions. These functions are thread-safe.
*
@@ -76,18 +89,6 @@ typedef struct BlockDevOps {
* Runs when the size changed (e.g. monitor command block_resize)
*/
void (*resize_cb)(void *opaque);
- /*
- * Runs when the backend receives a drain request.
- */
- void (*drained_begin)(void *opaque);
- /*
- * Runs when the backend's last drain request ends.
- */
- void (*drained_end)(void *opaque);
- /*
- * Is the device still busy?
- */
- bool (*drained_poll)(void *opaque);
} BlockDevOps;
/*
diff --git a/block/io.c b/block/io.c
index 4d54fda593..fece938fd0 100644
--- a/block/io.c
+++ b/block/io.c
@@ -60,7 +60,7 @@ static void bdrv_parent_drained_begin(BlockDriverState *bs,
BdrvChild *ignore)
void bdrv_parent_drained_end_single(BdrvChild *c)
{
- IO_OR_GS_CODE();
+ GLOBAL_STATE_CODE();
assert(c->quiesced_parent);
c->quiesced_parent = false;
@@ -108,7 +108,7 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs,
BdrvChild *ignore,
void bdrv_parent_drained_begin_single(BdrvChild *c)
{
- IO_OR_GS_CODE();
+ GLOBAL_STATE_CODE();
assert(!c->quiesced_parent);
c->quiesced_parent = true;
@@ -247,7 +247,7 @@ typedef struct {
bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
bool ignore_bds_parents)
{
- IO_OR_GS_CODE();
+ GLOBAL_STATE_CODE();
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
return true;
@@ -334,7 +334,8 @@ static void coroutine_fn
bdrv_co_yield_to_drain(BlockDriverState *bs,
if (ctx != co_ctx) {
aio_context_release(ctx);
}
- replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
+ replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
+ bdrv_co_drain_bh_cb, &data);
qemu_coroutine_yield();
/* If we are resumed from some other event (such as an aio completion or a
@@ -357,6 +358,8 @@ static void bdrv_do_drained_begin(BlockDriverState *bs,
BdrvChild *parent,
return;
}
+ GLOBAL_STATE_CODE();
+
/* Stop things in parent-to-child order */
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
aio_disable_external(bdrv_get_aio_context(bs));
@@ -399,11 +402,14 @@ static void bdrv_do_drained_end(BlockDriverState *bs,
BdrvChild *parent)
{
int old_quiesce_counter;
+ IO_OR_GS_CODE();
+
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs, false, parent, false);
return;
}
assert(bs->quiesce_counter > 0);
+ GLOBAL_STATE_CODE();
/* Re-enable things in child-to-parent order */
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 08bb0f9984..d53a633577 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -483,19 +483,19 @@ struct test_iothread_data {
BlockDriverState *bs;
enum drain_type drain_type;
int *aio_ret;
+ bool co_done;
};
-static void test_iothread_drain_entry(void *opaque)
+static void coroutine_fn test_iothread_drain_co_entry(void *opaque)
{
struct test_iothread_data *data = opaque;
- aio_context_acquire(bdrv_get_aio_context(data->bs));
do_drain_begin(data->drain_type, data->bs);
g_assert_cmpint(*data->aio_ret, ==, 0);
do_drain_end(data->drain_type, data->bs);
- aio_context_release(bdrv_get_aio_context(data->bs));
- qemu_event_set(&done_event);
+ data->co_done = true;
+ aio_wait_kick();
}
static void test_iothread_aio_cb(void *opaque, int ret)
@@ -531,6 +531,7 @@ static void test_iothread_common(enum drain_type
drain_type, int drain_thread)
BlockDriverState *bs;
BDRVTestState *s;
BlockAIOCB *acb;
+ Coroutine *co;
int aio_ret;
struct test_iothread_data data;
@@ -609,8 +610,9 @@ static void test_iothread_common(enum drain_type
drain_type, int drain_thread)
}
break;
case 1:
- aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data);
- qemu_event_wait(&done_event);
+ co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
+ aio_co_enter(ctx_a, co);
+ AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done);
break;
default:
g_assert_not_reached();
--
2.40.1
- [PULL 02/32] block: Clarify locking rules for bdrv_open(_inherit)(), (continued)
- [PULL 02/32] block: Clarify locking rules for bdrv_open(_inherit)(), Kevin Wolf, 2023/05/30
- [PULL 01/32] block-coroutine-wrapper: Take AioContext lock in no_co_wrappers, Kevin Wolf, 2023/05/30
- [PULL 04/32] block-backend: Fix blk_new_open() for iothreads, Kevin Wolf, 2023/05/30
- [PULL 19/32] block/export: stop using is_external in vhost-user-blk server, Kevin Wolf, 2023/05/30
- [PULL 16/32] virtio-scsi: stop using aio_disable_external() during unplug, Kevin Wolf, 2023/05/30
- [PULL 15/32] virtio-scsi: avoid race between unplug and transport event, Kevin Wolf, 2023/05/30
- [PULL 17/32] util/vhost-user-server: rename refcount to in_flight counter, Kevin Wolf, 2023/05/30
- [PULL 18/32] block/export: wait for vhost-user-blk requests when draining, Kevin Wolf, 2023/05/30
- [PULL 24/32] hw/xen: do not set is_external=true on evtchn fds, Kevin Wolf, 2023/05/30
- [PULL 26/32] block/export: don't require AioContext lock around blk_exp_ref/unref(), Kevin Wolf, 2023/05/30
- [PULL 22/32] block: drain from main loop thread in bdrv_co_yield_to_drain(),
Kevin Wolf <=
- [PULL 30/32] virtio-scsi: implement BlockDevOps->drained_begin(), Kevin Wolf, 2023/05/30
- [PULL 07/32] raw-format: Fix open with 'file' in iothread, Kevin Wolf, 2023/05/30
- [PULL 11/32] iotests: Make verify_virtio_scsi_pci_or_ccw() public, Kevin Wolf, 2023/05/30
- [PULL 09/32] block: Take AioContext lock in bdrv_open_driver(), Kevin Wolf, 2023/05/30
- [PULL 14/32] hw/qdev: introduce qdev_is_realized() helper, Kevin Wolf, 2023/05/30
- [PULL 12/32] iotests: Test blockdev-create in iothread, Kevin Wolf, 2023/05/30
- [PULL 10/32] block: Fix AioContext locking in bdrv_insert_node(), Kevin Wolf, 2023/05/30
- [PULL 23/32] xen-block: implement BlockDevOps->drained_begin(), Kevin Wolf, 2023/05/30
- [PULL 21/32] block: add blk_in_drain() API, Kevin Wolf, 2023/05/30
- [PULL 13/32] block-backend: split blk_do_set_aio_context(), Kevin Wolf, 2023/05/30