[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 12/21] xen-block: implement BlockDevOps->drained_begin()
From: |
Stefan Hajnoczi |
Subject: |
[PATCH v5 12/21] xen-block: implement BlockDevOps->drained_begin() |
Date: |
Thu, 4 May 2023 15:53:18 -0400 |
Detach event channels during drained sections to stop I/O submission
from the ring. xen-block is no longer reliant on aio_disable_external()
after this patch. This will allow us to remove the
aio_disable_external() API once all other code that relies on it is
converted.
Extend xen_device_set_event_channel_context() to allow ctx=NULL. The
event channel still exists but the event loop does not monitor the file
descriptor. Event channel processing can resume by calling
xen_device_set_event_channel_context() with a non-NULL ctx.
Factor out xen_device_set_event_channel_context() calls in
hw/block/dataplane/xen-block.c into attach/detach helper functions.
Incidentally, these don't require the AioContext lock because
aio_set_fd_handler() is thread-safe.
It's safer to register BlockDevOps after the dataplane instance has been
created. The BlockDevOps .drained_begin/end() callbacks depend on the
dataplane instance, so move the blk_set_dev_ops() call after
xen_block_dataplane_create().
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
hw/block/dataplane/xen-block.h | 2 ++
hw/block/dataplane/xen-block.c | 42 +++++++++++++++++++++++++---------
hw/block/xen-block.c | 24 ++++++++++++++++---
hw/xen/xen-bus.c | 7 ++++--
4 files changed, 59 insertions(+), 16 deletions(-)
diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h
index 76dcd51c3d..7b8e9df09f 100644
--- a/hw/block/dataplane/xen-block.h
+++ b/hw/block/dataplane/xen-block.h
@@ -26,5 +26,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
unsigned int protocol,
Error **errp);
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane);
+void xen_block_dataplane_attach(XenBlockDataPlane *dataplane);
+void xen_block_dataplane_detach(XenBlockDataPlane *dataplane);
#endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index d8bc39d359..2597f38805 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -664,6 +664,30 @@ void xen_block_dataplane_destroy(XenBlockDataPlane
*dataplane)
g_free(dataplane);
}
+void xen_block_dataplane_detach(XenBlockDataPlane *dataplane)
+{
+ if (!dataplane || !dataplane->event_channel) {
+ return;
+ }
+
+ /* Only reason for failure is a NULL channel */
+ xen_device_set_event_channel_context(dataplane->xendev,
+ dataplane->event_channel,
+ NULL, &error_abort);
+}
+
+void xen_block_dataplane_attach(XenBlockDataPlane *dataplane)
+{
+ if (!dataplane || !dataplane->event_channel) {
+ return;
+ }
+
+ /* Only reason for failure is a NULL channel */
+ xen_device_set_event_channel_context(dataplane->xendev,
+ dataplane->event_channel,
+ dataplane->ctx, &error_abort);
+}
+
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
{
XenDevice *xendev;
@@ -674,13 +698,11 @@ void xen_block_dataplane_stop(XenBlockDataPlane
*dataplane)
xendev = dataplane->xendev;
- aio_context_acquire(dataplane->ctx);
- if (dataplane->event_channel) {
- /* Only reason for failure is a NULL channel */
- xen_device_set_event_channel_context(xendev, dataplane->event_channel,
- qemu_get_aio_context(),
- &error_abort);
+ if (!blk_in_drain(dataplane->blk)) {
+ xen_block_dataplane_detach(dataplane);
}
+
+ aio_context_acquire(dataplane->ctx);
/* Xen doesn't have multiple users for nodes, so this can't fail */
blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
aio_context_release(dataplane->ctx);
@@ -819,11 +841,9 @@ void xen_block_dataplane_start(XenBlockDataPlane
*dataplane,
blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
aio_context_release(old_context);
- /* Only reason for failure is a NULL channel */
- aio_context_acquire(dataplane->ctx);
- xen_device_set_event_channel_context(xendev, dataplane->event_channel,
- dataplane->ctx, &error_abort);
- aio_context_release(dataplane->ctx);
+ if (!blk_in_drain(dataplane->blk)) {
+ xen_block_dataplane_attach(dataplane);
+ }
return;
diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
index f5a744589d..f099914831 100644
--- a/hw/block/xen-block.c
+++ b/hw/block/xen-block.c
@@ -189,8 +189,26 @@ static void xen_block_resize_cb(void *opaque)
xen_device_backend_printf(xendev, "state", "%u", state);
}
+/* Suspend request handling */
+static void xen_block_drained_begin(void *opaque)
+{
+ XenBlockDevice *blockdev = opaque;
+
+ xen_block_dataplane_detach(blockdev->dataplane);
+}
+
+/* Resume request handling */
+static void xen_block_drained_end(void *opaque)
+{
+ XenBlockDevice *blockdev = opaque;
+
+ xen_block_dataplane_attach(blockdev->dataplane);
+}
+
static const BlockDevOps xen_block_dev_ops = {
- .resize_cb = xen_block_resize_cb,
+ .resize_cb = xen_block_resize_cb,
+ .drained_begin = xen_block_drained_begin,
+ .drained_end = xen_block_drained_end,
};
static void xen_block_realize(XenDevice *xendev, Error **errp)
@@ -242,8 +260,6 @@ static void xen_block_realize(XenDevice *xendev, Error
**errp)
return;
}
- blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev);
-
if (conf->discard_granularity == -1) {
conf->discard_granularity = conf->physical_block_size;
}
@@ -277,6 +293,8 @@ static void xen_block_realize(XenDevice *xendev, Error
**errp)
blockdev->dataplane =
xen_block_dataplane_create(xendev, blk, conf->logical_block_size,
blockdev->props.iothread);
+
+ blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev);
}
static void xen_block_frontend_changed(XenDevice *xendev,
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
index c59850b1de..b8f408c9ed 100644
--- a/hw/xen/xen-bus.c
+++ b/hw/xen/xen-bus.c
@@ -846,8 +846,11 @@ void xen_device_set_event_channel_context(XenDevice
*xendev,
NULL, NULL, NULL, NULL, NULL);
channel->ctx = ctx;
- aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
- xen_device_event, NULL, xen_device_poll, NULL, channel);
+ if (ctx) {
+ aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh),
+ true, xen_device_event, NULL, xen_device_poll, NULL,
+ channel);
+ }
}
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
--
2.40.1
- [PATCH v5 04/21] virtio-scsi: avoid race between unplug and transport event, (continued)
- [PATCH v5 04/21] virtio-scsi: avoid race between unplug and transport event, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 05/21] virtio-scsi: stop using aio_disable_external() during unplug, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 06/21] util/vhost-user-server: rename refcount to in_flight counter, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 07/21] block/export: wait for vhost-user-blk requests when draining, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 08/21] block/export: stop using is_external in vhost-user-blk server, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 09/21] hw/xen: do not use aio_set_fd_handler(is_external=true) in xen_xenstore, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 10/21] block: add blk_in_drain() API, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 11/21] block: drain from main loop thread in bdrv_co_yield_to_drain(), Stefan Hajnoczi, 2023/05/04
- [PATCH v5 12/21] xen-block: implement BlockDevOps->drained_begin(),
Stefan Hajnoczi <=
- [PATCH v5 14/21] block/export: rewrite vduse-blk drain code, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 16/21] block/fuse: do not set is_external=true on FUSE fd, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 15/21] block/export: don't require AioContext lock around blk_exp_ref/unref(), Stefan Hajnoczi, 2023/05/04
- [PATCH v5 13/21] hw/xen: do not set is_external=true on evtchn fds, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 17/21] virtio: make it possible to detach host notifier from any thread, Stefan Hajnoczi, 2023/05/04
- [PATCH v5 19/21] virtio-scsi: implement BlockDevOps->drained_begin(), Stefan Hajnoczi, 2023/05/04
- [PATCH v5 18/21] virtio-blk: implement BlockDevOps->drained_begin(), Stefan Hajnoczi, 2023/05/04